relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/library | library | onnx | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple(_get_dim(d) for d in shape.dim)
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
|
PyTorch/Classification/ConvNets/resnext101-32x4d/training/FP32 | FP32 | DGX1V_resnext101-32x4d_FP32_90E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnext101-32x4d --precision FP32 --mode convergence --platform DGX1V /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
|
PyTorch/LanguageModeling/BART/scripts | scripts | run_eval_summarization | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
INIT_CKPT=${1}
if [ ! -f "$INIT_CKPT" ]; then
echo "$INIT_CKPT does not exist. Cannot run inference without a valid checkpoint"
exit -1
fi
PRED_BS=${2:-96}
NUM_GPU=${3:-8}
PRECISION=${4:-fp16}
EVAL_BEAMS=${5:-4}
MAX_SOURCE_LEN=${6:-1024}
MAX_TARGET_LEN=${7:-142}
DATA_DIR=${8:-data/cnn_dm}
CONFIG_PATH=${9:-"configs/config.json"}
PRELN=${10:-true}
printf -v TAG "bart_pyt_inference"
DATESTAMP=`date +'%y%m%d%H%M%S'`
RESULTS_DIR=${RESULTS_DIR:-results/${TAG}_${DATESTAMP}}
mkdir -p $RESULTS_DIR
if [ "$PRECISION" = "fp16" ] ; then
echo "fp16 activated!"
USE_FP16="--fp16"
elif [ "$PRECISION" = "bf16" ] ; then
echo "bf16 activated!"
USE_FP16="--bf16"
else
echo "fp32/tf32 activated!"
USE_FP16=""
fi
if [ "$PRELN" = "true" ] ; then
echo "Use PreLN"
USE_FP16="--pre_ln $USE_FP16"
else
echo "Use PostLN"
fi
python -m torch.distributed.launch --nproc_per_node=$NUM_GPU run_eval.py \
--task summarization \
--bs ${PRED_BS} --max_source_length=${MAX_SOURCE_LEN} --max_target_length=${MAX_TARGET_LEN} \
--eval_max_gen_length=${MAX_TARGET_LEN} --eval_beams=${EVAL_BEAMS} ${USE_FP16} \
${INIT_CKPT} ${CONFIG_PATH} ${DATA_DIR} ${RESULTS_DIR} |& tee -a ${RESULTS_DIR}/joblog.log
|
PyTorch/Detection/Efficientdet/scripts/D0 | D0 | validation_FP32_V100-32G | #!/bin/bash
rm -rf *.json
python -u -m bind_launch --nproc_per_node=${NUM_PROC:-1} validate.py '/workspace/object_detection/datasets/coco/' --model efficientdet_d0 -b ${BATCH_SIZE:-8} --torchscript --use-ema --checkpoint ${CKPT_PATH:-/checkpoints/Effdet_B0.pth} |
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/cc/kernels/ampere | ampere | dot_based_interact_ampere | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef KERNEL_DOT_BASED_INTERACT_AMPERE_H_
#define KERNEL_DOT_BASED_INTERACT_AMPERE_H_
void dotBasedInteractAmpereF16Fwd(const void *input,
const void *bottom_mlp_output,
void *output,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream);
void dotBasedInteractAmpereF16Bwd(const void *input,
const void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream);
void dotBasedInteractAmpereTF32Fwd(const void *input,
const void *bottom_mlp_output,
void *output,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream);
void dotBasedInteractAmpereTF32Bwd(const void *input,
const void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream);
#endif //KERNEL_DOT_BASED_INTERACT_AMPERE_H_
|
PyTorch/Translation/Transformer/fairseq | fairseq | log_helper | import os
import atexit
import time
import itertools
from collections import OrderedDict
import dllogger
from dllogger import Backend, JSONStreamBackend
from tensorboardX import SummaryWriter
import torch
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.updated = False
self.avg = 0
self.sum = 0
self.count = 0
def update(self, value):
self.updated = True
if isinstance(value, (tuple, list)):
val = value[0]
n = value[1]
else:
val = value
n = 1
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
@property
def value(self):
return self.avg
class PerformanceMeter():
def __init__(self):
self.reset()
def reset(self):
self.updated = False
torch.cuda.synchronize()
self.start = time.time()
self.n = 0
def update(self, val=1):
self.updated = True
self.n += val
@property
def value(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
torch.cuda.synchronize()
return time.time() - self.start
METRIC = {'average': AverageMeter, 'performance': PerformanceMeter}
class AggregatorBackend(Backend):
def __init__(self, verbosity, agg_dict):
super().__init__(verbosity=verbosity)
agg_dict = OrderedDict({k: v if isinstance(v, (tuple, list)) else (v,) for k, v in agg_dict.items()})
self.metrics = OrderedDict({k: [METRIC[x]() for x in v] for k, v in agg_dict.items()})
self.metrics.flushed = True
self.step = 0
self.epoch = 0
torch.cuda.synchronize()
self.start_time = time.time()
@property
def log_level(self):
return self._log_level
def metadata(self, timestamp, elapsedtime, metric, metadata):
pass
def _reset_perf_meter(self, name):
for agg in self.metrics[name]:
if isinstance(agg, PerformanceMeter):
agg.reset()
def reset_perf_meters(self):
for name in self.metrics.keys():
self._reset_perf_meter(name)
def log(self, timestamp, elapsedtime, step, data):
self.step = step
if 'epoch' in data.keys():
self.epoch = data['epoch']
for k, v in data.items():
if k not in self.metrics.keys():
continue
self.metrics.flushed = False
for ag in self.metrics[k]:
ag.update(v)
def flush(self):
if self.metrics.flushed:
return
result_string = 'Transformer | epoch {} | step {} |'.format(self.epoch, self.step)
for name, aggregators in self.metrics.items():
for agg in aggregators:
if not agg.updated:
continue
if isinstance(agg, AverageMeter):
_name = 'avg ' + name
elif isinstance(agg, PerformanceMeter):
_name = name + '/s'
result_string += _name + ' {:.3f} |'.format(agg.value)
agg.reset()
torch.cuda.synchronize()
result_string += 'walltime {:.3f} |'.format(time.time() - self.start_time)
self.metrics.flushed = True
print(result_string)
class TensorBoardBackend(Backend):
def __init__(self, verbosity, log_dir):
super().__init__(verbosity=verbosity)
self.summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'TB_summary'),
flush_secs=120,
max_queue=200
)
atexit.register(self.summary_writer.close)
@property
def log_level(self):
return self._log_level
def metadata(self, timestamp, elapsedtime, metric, metadata):
pass
def log(self, timestamp, elapsedtime, step, data):
if not isinstance(step, int):
return
for k, v in data.items():
self.summary_writer.add_scalar(k, v, step)
def flush(self):
pass
def setup_logger(args):
aggregator_dict = OrderedDict([
('loss', 'average'),
('weighted_loss', 'average'),
('tokens', ('average', 'performance')),
('updates', 'performance'),
('gnorm', 'average')
])
os.makedirs(args.save_dir, exist_ok=True)
log_path = os.path.join(args.save_dir, args.stat_file)
if os.path.exists(log_path):
for i in itertools.count():
s_fname = args.stat_file.split('.')
fname = '.'.join(s_fname[:-1]) + f'_{i}.' + s_fname[-1] if len(s_fname) > 1 else args.stat_file + f'.{i}'
log_path = os.path.join(args.save_dir, fname)
if not os.path.exists(log_path):
break
if not args.distributed_world_size > 1 or args.distributed_rank == 0:
dllogger.init(backends=[JSONStreamBackend(verbosity=1, filename=log_path),
AggregatorBackend(verbosity=0, agg_dict=aggregator_dict),
TensorBoardBackend(verbosity=1, log_dir=args.save_dir)])
else:
dllogger.init(backends=[])
for k, v in vars(args).items():
dllogger.log(step='PARAMETER', data={k: v}, verbosity=0)
container_setup_info = get_framework_env_vars()
dllogger.log(step='PARAMETER', data=container_setup_info, verbosity=0)
dllogger.metadata('loss', {'unit': 'nat', 'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN'})
dllogger.metadata('val_loss', {'unit': 'nat', 'GOAL': 'MINIMIZE', 'STAGE': 'VAL'})
dllogger.metadata('speed', {'unit': 'tokens/s', 'format': ':.3f', 'GOAL': 'MAXIMIZE', 'STAGE': 'TRAIN'})
dllogger.metadata('accuracy', {'unit': 'bleu', 'format': ':.2f', 'GOAL': 'MAXIMIZE', 'STAGE': 'VAL'})
def get_framework_env_vars():
return {
'NVIDIA_PYTORCH_VERSION': os.environ.get('NVIDIA_PYTORCH_VERSION'),
'PYTORCH_VERSION': os.environ.get('PYTORCH_VERSION'),
'CUBLAS_VERSION': os.environ.get('CUBLAS_VERSION'),
'NCCL_VERSION': os.environ.get('NCCL_VERSION'),
'CUDA_DRIVER_VERSION': os.environ.get('CUDA_DRIVER_VERSION'),
'CUDNN_VERSION': os.environ.get('CUDNN_VERSION'),
'CUDA_VERSION': os.environ.get('CUDA_VERSION'),
'NVIDIA_PIPELINE_ID': os.environ.get('NVIDIA_PIPELINE_ID'),
'NVIDIA_BUILD_ID': os.environ.get('NVIDIA_BUILD_ID'),
'NVIDIA_TF32_OVERRIDE': os.environ.get('NVIDIA_TF32_OVERRIDE'),
}
def reset_perf_meters():
for backend in dllogger.GLOBAL_LOGGER.backends:
if isinstance(backend, AggregatorBackend):
backend.reset_perf_meters()
|
. | DeepLearningExamples | hubconf | import os
import sys
from PyTorch.Detection.SSD.ssd import nvidia_ssd, nvidia_ssd_processing_utils
sys.path.append(os.path.join(sys.path[0], 'PyTorch/Detection/SSD'))
from PyTorch.Classification.ConvNets.image_classification.models import resnet50 as nvidia_resnet50
from PyTorch.Classification.ConvNets.image_classification.models import resnext101_32x4d as nvidia_resnext101_32x4d
from PyTorch.Classification.ConvNets.image_classification.models import se_resnext101_32x4d as nvidia_se_resnext101_32x4d
from PyTorch.Classification.ConvNets.image_classification.models import efficientnet_b0 as nvidia_efficientnet_b0
from PyTorch.Classification.ConvNets.image_classification.models import efficientnet_b4 as nvidia_efficientnet_b4
from PyTorch.Classification.ConvNets.image_classification.models import efficientnet_widese_b0 as nvidia_efficientnet_widese_b0
from PyTorch.Classification.ConvNets.image_classification.models import efficientnet_widese_b4 as nvidia_efficientnet_widese_b4
from PyTorch.Classification.ConvNets.image_classification.models import nvidia_convnets_processing_utils
from PyTorch.Classification.ConvNets.image_classification.models import resnext101_32x4d as nvidia_resneXt
from PyTorch.Classification.ConvNets.image_classification.models import nvidia_efficientnet
sys.path.append(os.path.join(sys.path[0], 'PyTorch/Classification/ConvNets/image_classification'))
from PyTorch.Classification.GPUNet.configs.gpunet_torchhub import nvidia_gpunet
sys.path.append(os.path.join(sys.path[0], 'PyTorch/Classification/GPUNet/configs'))
from PyTorch.SpeechSynthesis.Tacotron2.tacotron2 import nvidia_tacotron2
from PyTorch.SpeechSynthesis.Tacotron2.tacotron2 import nvidia_tts_utils
from PyTorch.SpeechSynthesis.Tacotron2.waveglow import nvidia_waveglow
sys.path.append(os.path.join(sys.path[0], 'PyTorch/SpeechSynthesis/Tacotron2'))
from PyTorch.SpeechSynthesis.HiFiGAN.fastpitch import nvidia_fastpitch
from PyTorch.SpeechSynthesis.HiFiGAN.fastpitch import nvidia_textprocessing_utils
from PyTorch.SpeechSynthesis.HiFiGAN.hifigan import nvidia_hifigan
sys.path.append(os.path.join(sys.path[0], 'PyTorch/SpeechSynthesis/HiFiGAN'))
from PyTorch.Forecasting.TFT.tft_torchhub import nvidia_tft, nvidia_tft_data_utils
sys.path.append(os.path.join(sys.path[0], 'PyTorch/Forecasting/TFT'))
|
TensorFlow/Recommendation/VAE-CF/vae/models | models | train | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import horovod.tensorflow as hvd
import scipy.sparse as sparse
import tensorflow as tf
import numpy as np
import time
import logging
import dllogger
from sklearn.preprocessing import normalize
from collections import defaultdict
from vae.models.vae import _VAEGraph, TRAINING, QUERY, VALIDATION
from vae.utils.round import round_8
LOG = logging.getLogger("VAE")
class VAE:
def __init__(self,
train_data,
encoder_dims,
decoder_dims=None,
batch_size_train=500,
batch_size_validation=2000,
lam=3e-2,
lr=1e-3,
beta1=0.9,
beta2=0.999,
total_anneal_steps=200000,
anneal_cap=0.2,
xla=True,
activation='tanh',
checkpoint_dir=None,
trace=False,
top_results=100):
if decoder_dims is None:
decoder_dims = encoder_dims[::-1]
for i in encoder_dims + decoder_dims + [batch_size_train, batch_size_validation]:
if i != round_8(i):
raise ValueError("all dims and batch sizes should be divisible by 8")
self.metrics_history = None
self.batch_size_train = batch_size_train
self.batch_size_validation = batch_size_validation
self.lam = lam
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.xla = xla
self.total_anneal_steps = total_anneal_steps
self.anneal_cap = anneal_cap
self.activation = activation
self.encoder_dims = encoder_dims
self.decoder_dims = decoder_dims
self.trace = trace
self.top_results = top_results
self.checkpoint_dir = checkpoint_dir if hvd.rank() == 0 else None
self._create_dataset(train_data,
batch_size_train,
encoder_dims)
self._setup_model()
self.metrics_history = defaultdict(lambda: [])
self.time_elapsed_training_history = []
self.time_elapsed_validation_history = []
self.training_throughputs = []
self.inference_throughputs = []
def _create_dataset(self, train_data, batch_size_train, encoder_dims):
generator, self.n_batch_per_train = self.batch_iterator(train_data,
None,
batch_size_train,
thread_idx=hvd.rank(),
thread_num=hvd.size())
dataset = tf.data.Dataset \
.from_generator(generator, output_types=(tf.int64, tf.float32)) \
.map(lambda i, v: tf.SparseTensor(i, v, (batch_size_train, encoder_dims[0]))) \
.prefetch(10)
self.iter = dataset.make_initializable_iterator()
self.inputs_train = self.iter.get_next()
def _setup_model(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if self.trace:
hooks.append(tf.train.ProfilerHook(save_steps=1, output_dir='.'))
if self.xla:
LOG.info('Enabling XLA')
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
else:
LOG.info('XLA disabled')
self._build_graph()
self.session = tf.train.MonitoredTrainingSession(config=config,
checkpoint_dir=self.checkpoint_dir,
save_checkpoint_secs=10,
hooks=hooks)
def _build_optimizer(self, loss):
optimizer= tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1, beta2=self.beta2)
return hvd.DistributedOptimizer(optimizer).minimize(
loss, global_step=tf.train.get_or_create_global_step())
def close_session(self):
if self.session is not None:
self.session.close()
def batch_iterator(self, data_input, data_true=None, batch_size=500, thread_idx=0, thread_num=1):
training = data_true is None
data_input = normalize(data_input)
indices = np.arange(data_input.shape[0])
global_batch_size = batch_size * hvd.size()
if training:
# crop the data so that each gpu has the same number of batches
stop = data_input.shape[0] // global_batch_size * global_batch_size
LOG.info('Cropping each epoch from: {} to {} samples'.format(data_input.shape[0], stop))
else:
stop = data_input.shape[0]
def generator():
data_in = data_input
epoch = 0
while True:
if training:
# deterministic shuffle necessary for multigpu
np.random.seed(epoch)
np.random.shuffle(indices)
data_in = data_in[indices]
for st_idx in range(thread_idx * batch_size, stop, thread_num * batch_size):
batch = data_in[st_idx:st_idx + batch_size].copy()
batch = batch.tocoo()
idxs = np.stack([batch.row, batch.col], axis=1)
vals = batch.data
if training:
np.random.seed(epoch * thread_num + thread_idx)
nnz = vals.shape[0]
# dropout with keep_prob=0.5
vals *= (2 * np.random.randint(2, size=nnz))
yield (idxs, vals)
else:
yield idxs, vals, data_true[st_idx:st_idx + batch_size]
if not training:
break
epoch += 1
be = thread_idx * batch_size
st = thread_num * batch_size
return generator, int(np.ceil((stop - be) / st))
def _build_graph(self):
self.vae = _VAEGraph(self.encoder_dims, self.decoder_dims, self.activation)
self.inputs_validation = tf.sparse.placeholder(
dtype=tf.float32,
shape=np.array([self.batch_size_validation, self.vae.input_dim], dtype=np.int32))
self.inputs_query = tf.sparse.placeholder(
dtype=tf.float32,
shape=np.array([1, self.vae.input_dim], dtype=np.int32))
self.top_k_validation = self._gen_handlers(mode=VALIDATION)
self.logits_train, self.loss_train, self.optimizer = self._gen_handlers(mode=TRAINING)
self.top_k_query = self._gen_handlers(mode=QUERY)
global_step = tf.train.get_or_create_global_step()
self.increment_global_step = tf.assign(global_step, global_step + 1)
def _gen_handlers(self, mode):
# model input
if mode is TRAINING:
inputs = self.inputs_train
elif mode is VALIDATION:
inputs = self.inputs_validation
elif mode is QUERY:
inputs = self.inputs_query
else:
assert False
if mode is TRAINING:
batch_size = self.batch_size_train
elif mode is VALIDATION:
batch_size = self.batch_size_validation
elif mode is QUERY:
batch_size = 1
else:
assert False
# model output
logits, latent_mean, latent_log_var = self.vae(inputs, mode=mode)
if mode in [VALIDATION, QUERY]:
mask = tf.ones_like(inputs.values) * (-np.inf)
logits = tf.tensor_scatter_nd_update(logits, inputs.indices, mask)
top_k_values, top_k_indices = tf.math.top_k(logits, sorted=True, k=self.top_results)
return top_k_indices
softmax = tf.nn.log_softmax(logits)
anneal = tf.math.minimum(
tf.cast(tf.train.get_or_create_global_step(), tf.float32) /
self.total_anneal_steps, self.anneal_cap)
# KL divergence
KL = tf.reduce_mean(
tf.reduce_sum(
(-latent_log_var + tf.exp(latent_log_var) + latent_mean ** 2 - 1)
/ 2,
axis=1))
# per-user average negative log-likelihood part of loss
ll_loss = -tf.reduce_sum(tf.gather_nd(softmax, inputs.indices)) / batch_size
# regularization part of loss
reg_loss = 2 * tf.reduce_sum(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = ll_loss + self.lam * reg_loss + anneal * KL
train_op = self._build_optimizer(loss)
return logits, ll_loss, train_op
def train(
self,
n_epochs: int,
validation_data_input: sparse.csr_matrix,
validation_data_true: sparse.csr_matrix,
metrics: dict, # Dict[str, matrix -> matrix -> float]
validation_step: 10,
):
"""
Train the model
:param n_epochs: number of epochs
:param train_data: train matrix of shape users count x items count
:param metrics: Dictionary of metric names to metric functions
:param validation_step: If it's set to n then validation is run once every n epochs
"""
self.total_time_start = time.time()
self.session.run(self.iter.initializer)
num_workers = hvd.size()
for epoch in range(1, n_epochs + 1):
init_time = time.time()
for i in range(self.n_batch_per_train):
self.session.run(self.optimizer)
batches_per_epoch = i + 1
training_duration = time.time() - init_time
self.time_elapsed_training_history.append(training_duration)
training_throughput = num_workers * batches_per_epoch * self.batch_size_train / training_duration
self.training_throughputs.append(training_throughput)
dllogger.log(data={"train_epoch_time" : training_duration,
"train_throughput" : training_throughput},
step=(epoch,))
if (epoch % validation_step == 0 or epoch == n_epochs) and hvd.rank() == 0:
init_time = time.time()
metrics_scores = self.test(validation_data_input,
validation_data_true,
metrics,
epoch=epoch)
for name, score in metrics_scores.items():
self.metrics_history[name].append(score)
validation_duration = time.time() - init_time
self.time_elapsed_validation_history.append(validation_duration)
dllogger.log(data={"valid_time" : validation_duration},
step=(epoch,))
self.log_metrics(epoch, metrics_scores, n_epochs)
self.total_time = time.time() - self.total_time_start
if hvd.rank() == 0:
self.log_final_stats()
def test(
self,
test_data_input,
test_data_true,
metrics,
epoch=0,
):
"""
Test the performance of the model
:param metrics: Dictionary of metric names to metric functions
"""
metrics_scores = defaultdict(lambda: [])
gen = self.batch_iterator_val(test_data_input, test_data_true)
for idxs, vals, X_true in gen():
inference_begin = time.time()
if self.trace:
pred_val, _ = self.session.run([self.top_k_validation, self.increment_global_step],
feed_dict={self.inputs_validation: (idxs, vals)})
else:
pred_val = self.session.run(self.top_k_validation,
feed_dict={self.inputs_validation: (idxs, vals)})
elapsed = time.time() - inference_begin
pred_val = np.copy(pred_val)
inference_throughput = self.batch_size_validation / elapsed
self.inference_throughputs.append(inference_throughput)
dllogger.log(data={"inference_throughput" : inference_throughput},
step=(epoch,))
for name, metric in metrics.items():
metrics_scores[name].append(metric(X_true, pred_val))
# For some random seeds passed to the data preprocessing script
# the test set might contain samples that have no true items to be predicted.
# At least one such sample is present in about 7% of all possible test sets.
# We decided not to change the preprocessing to remain comparable to the original implementation.
# Therefore we're using the nan-aware mean from numpy to ignore users with no items to be predicted.
return {name: np.nanmean(scores) for name, scores in metrics_scores.items()}
def query(self, indices: np.ndarray):
"""
inference for batch size 1
:param input_data:
:return:
"""
values = np.ones(shape=(1, len(indices)))
values = normalize(values)
values = values.reshape(-1)
res = self.session.run(
self.top_k_query,
feed_dict={self.inputs_query: (indices,
values)})
return res
def _increment_global_step(self):
res = self.session.run(self.increment_global_step)
print('increment global step result: ', res)
def batch_iterator_train(self, data_input):
"""
:return: iterator of consecutive batches and its length
"""
data_input = normalize(data_input)
indices = np.arange(data_input.shape[0])
np.random.shuffle(indices)
data_input = data_input[list(indices)]
nsize, _ = data_input.shape
csize = nsize // self.batch_size_train * self.batch_size_train
def generator():
while True:
for st_idx in range(0, csize, self.batch_size_train):
idxs, vals = self.next_batch(data_input,st_idx, self.batch_size_train)
nnz = vals.shape[0]
vals *= (2 * np.random.randint(2, size=nnz))
yield (idxs, vals)
return generator, int(np.ceil(csize / self.batch_size_train))
def batch_iterator_val(self, data_input, data_true):
"""
:return: iterator of consecutive batches and its length
"""
data_input = normalize(data_input)
nsize, _ = data_input.shape
csize = nsize // self.batch_size_validation * self.batch_size_validation
def generator():
for st_idx in range(0, csize, self.batch_size_validation):
idxs, vals = self.next_batch(data_input, st_idx, self.batch_size_validation)
yield idxs, vals, data_true[st_idx:st_idx + self.batch_size_validation]
return generator
def next_batch(self, data_input, st_idx, batch_size):
batch = data_input[st_idx:st_idx + batch_size].copy()
batch = batch.tocoo()
idxs = np.stack([batch.row, batch.col], axis=1)
vals = batch.data
return idxs,vals
def log_metrics(self, epoch, metrics_scores, n_epochs):
dllogger.log(data=metrics_scores, step=(epoch,))
def log_final_stats(self):
data = {"mean_training_throughput": np.mean(self.training_throughputs[10:]),
"mean_inference_throughput": np.mean(self.inference_throughputs[2:])}
for metric_name, metric_values in self.metrics_history.items():
data["final_" + metric_name] = metric_values[-1]
dllogger.log(data=data, step=tuple())
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/data_loader/datasets | datasets | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .edge_ds import EdgeDS
DATASET_SOURCE = {
"edge_ds": EdgeDS,
}
|
TensorFlow2/Classification/ConvNets/model/blocks | blocks | conv2d_block | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from typing import Any, Dict, Optional, Text, Tuple
from model.layers import get_batch_norm
__all__ = ['conv2d_block']
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_in',
# Note: this is a truncated normal distribution
'distribution': 'normal'
}
}
def conv2d_block(inputs: tf.Tensor,
conv_filters: Optional[int],
config: dict,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
name: Text = None):
"""A conv2d followed by batch norm and an activation."""
batch_norm = get_batch_norm(config.mparams.batch_norm)
bn_momentum = config.mparams.bn_momentum
bn_epsilon = config.mparams.bn_epsilon
data_format = tf.keras.backend.image_data_format()
weight_decay = config.weight_decay
name = name or ''
# Collect args based on what kind of conv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_conv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
'data_format':data_format
}
CONV_KERNEL_INITIALIZER['config']['mode'] = config.mparams.weight_init
if depthwise:
conv2d = tf.keras.layers.DepthwiseConv2D
init_kwargs.update({'depthwise_initializer': CONV_KERNEL_INITIALIZER})
else:
conv2d = tf.keras.layers.Conv2D
init_kwargs.update({'filters': conv_filters,
'kernel_initializer': CONV_KERNEL_INITIALIZER})
x = conv2d(**init_kwargs)(inputs)
if use_batch_norm:
bn_axis = 1 if data_format == 'channels_first' else -1
x = batch_norm(axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + '_bn')(x)
if activation is not None:
x = tf.keras.layers.Activation(activation,
name=name + '_activation')(x)
return x |
TensorFlow2/LanguageModeling/BERT/scripts | scripts | benchmark_pretraining_lamb_phase2 | #! /bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
echo "Container nvidia build = " $NVIDIA_BUILD_ID
train_batch_size_phase1=${1:-64}
train_batch_size_phase2=${2:-8}
eval_batch_size=${3:-8}
learning_rate_phase1=${4:-"7.5e-4"}
learning_rate_phase2=${5:-"5e-4"}
precision=${6:-"fp16"}
use_xla=${7:-"true"}
num_gpus=${8:-2}
warmup_steps_phase1=${9:-"2000"}
warmup_steps_phase2=${10:-"200"}
train_steps=${11:-7820}
save_checkpoints_steps=${12:-100}
num_accumulation_steps_phase1=${13:-128}
num_accumulation_steps_phase2=${14:-512}
bert_model=${15:-"large"}
DATA_DIR=${DATA_DIR:-data}
#Edit to save logs & checkpoints in a different directory
RESULTS_DIR=${RESULTS_DIR:-/results}
if [ "$bert_model" = "large" ] ; then
export BERT_CONFIG=data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/bert_config.json
else
export BERT_CONFIG=data/download/google_pretrained_weights/uncased_L-12_H-768_A-12/bert_config.json
fi
echo "Container nvidia build = " $NVIDIA_BUILD_ID
PREC=""
if [ "$precision" = "fp16" ] ; then
PREC="--use_fp16"
elif [ "$precision" = "fp32" ] ; then
PREC=""
elif [ "$precision" = "manual_fp16" ] ; then
PREC="--manual_fp16"
else
echo "Unknown <precision> argument"
exit -2
fi
if [ "$use_xla" = "true" ] ; then
PREC="$PREC --enable_xla"
echo "XLA activated"
fi
mpi=""
if [ $num_gpus -gt 1 ] ; then
mpi="mpiexec --allow-run-as-root -np $num_gpus"
horovod="--use_horovod"
fi
#PHASE 1 Config
train_steps_phase1=$(expr $train_steps \* 9 \/ 10) #Phase 1 is 10% of training
gbs_phase1=$(expr $train_batch_size_phase1 \* $num_accumulation_steps_phase1)
PHASE1_CKPT=${RESULTS_DIR}/phase_1/pretrained/bert_model.ckpt-1
#PHASE 2
seq_len=512
max_pred_per_seq=80
train_steps_phase2=$(expr $train_steps \* 1 \/ 10) #Phase 2 is 10% of training
gbs_phase2=$(expr $train_batch_size_phase2 \* $num_accumulation_steps_phase2)
train_steps_phase2=$(expr $train_steps_phase2 \* $gbs_phase1 \/ $gbs_phase2) # Adjust for batch size
RESULTS_DIR_PHASE2=${RESULTS_DIR}/phase_2
mkdir -m 777 -p $RESULTS_DIR_PHASE2
INPUT_FILES="$DATA_DIR/tfrecord/lower_case_1_seq_len_${seq_len}_max_pred_${max_pred_per_seq}_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5_shard_1472_test_split_10/books_wiki_en_corpus/training/*"
EVAL_FILES="$DATA_DIR/tfrecord/lower_case_1_seq_len_${seq_len}_max_pred_${max_pred_per_seq}_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5_shard_1472_test_split_10/books_wiki_en_corpus/test"
$mpi python /workspace/bert_tf2/run_pretraining.py \
--input_files=$INPUT_FILES \
--model_dir=$RESULTS_DIR_PHASE2 \
--bert_config_file=$BERT_CONFIG \
--train_batch_size=$train_batch_size_phase2 \
--max_seq_length=$seq_len \
--max_predictions_per_seq=$max_pred_per_seq \
--num_steps_per_epoch=$train_steps --num_train_epochs=1 \
--steps_per_loop=$save_checkpoints_steps \
--save_checkpoint_steps=$save_checkpoints_steps \
--warmup_steps=$warmup_steps_phase2 \
--num_accumulation_steps=$num_accumulation_steps_phase2 \
--learning_rate=$learning_rate_phase2 \
--optimizer_type=LAMB \
$horovod $PREC
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner | runner | pipeline_impl | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type tf-keras \
--output-path ${SHARED_DIR}/exported_model.savedmodel \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
\
--checkpoint-dir ${CHECKPOINT_DIR}/checkpoint \
--batch-size ${MAX_BATCH_SIZE} \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--batch-size ${MAX_BATCH_SIZE} \
--data-pattern "${DATASETS_DIR}/outbrain/valid/*.parquet"
""",
)
)
pipeline.model_conversion(
commands=(
r"""
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.savedmodel \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--max-workspace-size 8589934592 \
--atol wide_deep_model=0.015 \
--rtol wide_deep_model=12.0
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 120 \
--verbose \
\
--batching ${MODEL_BATCHING} \
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--max-batch-size ${MAX_BATCH_SIZE} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device ${DEVICE_KIND}=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes ${MEASUREMENT_OFFLINE_BATCH_SIZES} \
--concurrency ${MEASUREMENT_OFFLINE_CONCURRENCY} \
--performance-tool ${PERFORMANCE_TOOL} \
--measurement-request-count 100 \
--evaluation-mode offline \
--warmup \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes ${MEASUREMENT_ONLINE_BATCH_SIZES} \
--concurrency ${MEASUREMENT_ONLINE_CONCURRENCY} \
--performance-tool ${PERFORMANCE_TOOL} \
--measurement-request-count 500 \
--evaluation-mode online \
--warmup \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) |
TensorFlow/Classification/ConvNets/utils | utils | var_storage | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
__all__ = ['model_variable_scope']
def model_variable_scope(name, reuse=False, dtype=tf.float32, *args, **kwargs):
"""Returns a variable scope that the model should be created under.
If self.dtype is a castable type, model variable will be created in fp32
then cast to self.dtype before being used.
Returns:
A variable scope for the model.
"""
def _custom_dtype_getter(getter, name, shape=None, dtype=None, trainable=True, regularizer=None, *args, **kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
storage_dtype = tf.float32 if dtype in [tf.float32, tf.float16] else dtype
variable = getter(
name,
shape,
dtype=storage_dtype,
trainable=trainable,
regularizer=(
regularizer if
(trainable and not any(l_name.lower() in name.lower()
for l_name in ['batchnorm', 'batch_norm'])) else None
),
*args,
**kwargs
)
if dtype != tf.float32:
cast_name = name + '/fp16_cast'
try:
cast_variable = tf.get_default_graph().get_tensor_by_name(cast_name + ':0')
except KeyError:
cast_variable = tf.cast(variable, dtype, name=cast_name)
cast_variable._ref = variable._ref
variable = cast_variable
return variable
return tf.variable_scope(name, reuse=reuse, dtype=dtype, custom_getter=_custom_dtype_getter, *args, **kwargs)
|
TensorFlow/Classification | Classification | README | # Image Classification
Image classification is the task of categorizing an image into one of several predefined classes, often also giving a probability of the input belonging to a certain class. This task is crucial in understanding and analyzing images, and it comes quite effortlessly to human beings with our complex visual systems. Most powerful image classification models today are built using some form of Convolution Neural Networks (CNNs), which are also the backbone of many other tasks in Computer Vision.

[Source](https://github.com/NVlabs/stylegan)
In this overview, we will cover
- Types of image Classification
- How does it work?
- How is the performance evaluated?
- Use cases and applications
- Where to get started
---
## Types of image Classification
Image Classification can be broadly divided into either Binary or Multi-class problems depending on the number of categories. Binary image classification problems entail predicting one of two classes. An example of this would be to predict whether an image is that of a dog or not. A subtly different problem is that of single-class (one vs all) classification, where the goal is to recognize data from one class and reject all other. This is beneficial when there is an overabundance of data from one of the classes, also called a class imbalance.

In Multi-class classification problems, models categorize instances into one of three or more categories. Multi-class models often also return confidence scores (or probabilities) of an image belonging to each of the possible classes. This should not be confused with multi-label classification, where a model assigns multiple labels to an instance.
---
## How does it work?
In recent years, Convolutional Neural Networks (CNNs) have led the way to massive breakthroughs in Computer Vision. Most state-of-the-art Image Classification models today employ CNNs in some form. Convolutional Layers are the building blocks of CNNs, and similar to Neural Networks they are composed of neurons that learn parameters like weights and biases. Most CNNs are composed of many Convolutional layers that work like feature extractors, and coupled with Fully Connected (FC) layers they learn to identify patterns in images to return confidence scores in different categories.
But what makes Convolutional Networks special? Well, CNNs are built with the assumption that input is in the form of images, and exploiting this fact they can be vastly more efficient than a standard Neural Network for a given level of performance.

Network depth (number of layers) and the number of learnable parameters have been found to be of crucial importance in performance. Top models can typically have over a hundred layers and hundreds of millions of parameters. Much of recent research in visual recognition has been focused around “network engineering”, i.e. designing better architectures, even employing Machine Learning algorithms to search for one, such as in the case of Neural Architecture Search.
---
## How is the performance evaluated?
Image Classification performance is often reported as Top-1 or Top-5 scores. In top-1 score, classification is considered correct if the top predicted class (with the highest predicted probability) matches the true class for a given instance. In top-5, we check if one of the top 5 predictions matches the true class. The score is just the number of correct predictions divided by the total number of instances evaluated.
---
## Use cases and applications
### Categorizing Images in Large Visual Databases
Businesses with visual databases may accumulate large amounts of images with missing tags or meta-data. Unless there is an effective way to organize such images, they may not be much use at all. On the contrary, they may hog precious storage space. Automated image classification algorithms can classify such untagged images into predefined categories. Businesses can avoid expensive manual labor by employing automated image classification algorithms.
A related task is that of Image Organization in smart devices like mobile phones. With Image Classification techniques, images and videos can be organized for improved accessibility.
### Visual Search
Visual Search or Image-based search has risen to popularity over the recent years. Many prominent search engines already provide this feature where users can search for visual content similar to a provided image. This has many applications in the e-commerce and retail industry where users can take a snap and upload an image of a product they are interested in purchasing. This makes the shopping experience much more efficient for customers, and can increase sales for businesses.
### Healthcare
Medical Imaging is about creating visual images of internal body parts for clinical purposes. This includes health monitoring, medical diagnosis, treatment, and keeping organized records. Image Classification algorithms can play a crucial role in Medical Imaging by assisting medical professionals detect presence of illness and having consistency in clinical diagnosis.
---
## Where to get started?
In this Collection, you will find state-of-the-art implementations of Image Classification models and their containers. A good place to get started with Image Classification is with the [ResNet-50](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5) model.
ResNets (Residual Networks) are very popular Convolutional Neural Network architectures built with blocks utilizing skip connections to jump over some layers. As the name suggests, ResNet-50 is a variant that is 50 layers deep! But why do we need these “skip” connections? As it turns out building better CNN architectures is not as simple as stacking more and more layers. In practice, If we just keep adding depth to a CNN, at some point the performance stagnates or may start getting worse. Very deep networks are notoriously difficult to train, because of the vanishing gradient problem. In simpler terms, as the depth increases, repeated multiplications during back-propagation may end up making the gradient vanishingly small. This may prevent weights from changing. In ResNets, the skip connects are meant to act like a “gradient superhighway” allowing the gradient to flow unrestrained thus alleviating the problem of the vanishing gradients. ResNets were very influential in the development of subsequent Convolutional Network architectures, and there is much more to them than the brief summary above! |
TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer | maintainer | container | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any
class Container(abc.ABC):
def __init__(self, name: str):
self.name = name
self._container = None
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> Any:
"""
Run command inside container
Args:
command: command to execute
Returns:
Any
"""
pass
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs/quick_schedules | quick_schedules | rpn_R_50_C4_quick | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50"
RPN_ONLY: True
RPN:
PRE_NMS_TOP_N_TEST: 12000
POST_NMS_TOP_N_TEST: 2000
DATASETS:
TRAIN: ("coco_2014_minival",)
TEST: ("coco_2014_minival",)
INPUT:
MIN_SIZE_TRAIN: 600
MAX_SIZE_TRAIN: 1000
MIN_SIZE_TEST: 800
MAX_SIZE_TEST: 1000
SOLVER:
BASE_LR: 0.005
WEIGHT_DECAY: 0.0001
STEPS: (1500,)
MAX_ITER: 2000
IMS_PER_BATCH: 4
TEST:
IMS_PER_BATCH: 2
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/model_analyzer | model_analyzer | model_analyzer_config | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .exceptions import ModelAnalyzerException
class ModelAnalyzerConfig:
"""
A config class to set arguments to the Model Analyzer.
An argument set to None will use the default.
"""
model_analyzer_args = [
"config-file",
]
input_to_options = [
"config-file",
]
def __init__(self):
# Args will be a dict with the string representation as key
self._args = {k: None for k in self.model_analyzer_args}
self._options = {
"-f": "config.yaml",
}
self._input_to_options = {
"config-file": "-f",
}
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the server with CLI.
Returns
-------
str
the command consisting of all set arguments to
the model analyzer.
e.g. '--model-repository=/models --verbose=True'
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
return " ".join(args)
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into model_analyzer
"""
return list(cls.model_analyzer_args) + list(cls.input_to_options)
def __getitem__(self, key):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the model analyzer
Returns
-------
The value that the argument is set to in this config
"""
if key in self._args:
return self._args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
else:
raise ModelAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key, value):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the model analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
else:
raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
|
PyTorch/SpeechSynthesis/FastPitch | FastPitch | requirements | inflect
librosa==0.9.0
matplotlib
numpy
pynvml==11.0.0
scipy
tensorboardX==2.0
git+https://github.com/NVIDIA/[email protected]#egg=dllogger
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops | ops | box_utils | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for bounding box processing."""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
EPSILON = 1e-8
BBOX_XFORM_CLIP = np.log(1000. / 16.)
def jitter_boxes(boxes, noise_scale=0.025):
"""Jitter the box coordinates by some noise distribution.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
noise_scale: a python float which specifies the magnitude of noise. The
rule of thumb is to set this between (0, 0.1]. The default value is found
to mimic the noisy detections best empirically.
Returns:
jittered_boxes: a tensor whose shape is the same as `boxes` representing
the jittered boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('jitter_boxes'):
bbox_jitters = tf.random.normal(boxes.get_shape(), stddev=noise_scale)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
width = xmax - xmin
height = ymax - ymin
new_center_x = (xmin + xmax) / 2.0 + bbox_jitters[..., 0:1] * width
new_center_y = (ymin + ymax) / 2.0 + bbox_jitters[..., 1:2] * height
new_width = width * tf.exp(bbox_jitters[..., 2:3])
new_height = height * tf.exp(bbox_jitters[..., 3:4])
jittered_boxes = tf.concat([
new_center_y - new_height * 0.5,
new_center_x - new_width * 0.5,
new_center_y + new_height * 0.5,
new_center_x + new_width * 0.5], axis=-1)
return jittered_boxes
def normalize_boxes(boxes, image_shape):
"""Converts boxes to the normalized coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
normalized_boxes: a tensor whose shape is the same as `boxes` representing
the normalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('normalize_boxes'):
if isinstance(image_shape, (list, tuple)):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0:1]
width = image_shape[..., 1:2]
ymin = boxes[..., 0:1] / height
xmin = boxes[..., 1:2] / width
ymax = boxes[..., 2:3] / height
xmax = boxes[..., 3:4] / width
normalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return normalized_boxes
def denormalize_boxes(boxes, image_shape):
"""Converts boxes normalized by [height, width] to pixel coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
denormalized_boxes: a tensor whose shape is the same as `boxes` representing
the denormalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
with tf.name_scope('denormalize_boxes'):
if isinstance(image_shape, (list, tuple)):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.split(image_shape, 2, axis=-1)
ymin, xmin, ymax, xmax = tf.split(boxes, 4, axis=-1)
ymin = ymin * height
xmin = xmin * width
ymax = ymax * height
xmax = xmax * width
denormalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return denormalized_boxes
def clip_boxes(boxes, image_shape):
"""Clips boxes to image boundaries.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
clipped_boxes: a tensor whose shape is the same as `boxes` representing the
clipped boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('clip_boxes'):
if isinstance(image_shape, (list, tuple)):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0:1]
width = image_shape[..., 1:2]
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
clipped_ymin = tf.maximum(tf.minimum(ymin, height - 1.0), 0.0)
clipped_ymax = tf.maximum(tf.minimum(ymax, height - 1.0), 0.0)
clipped_xmin = tf.maximum(tf.minimum(xmin, width - 1.0), 0.0)
clipped_xmax = tf.maximum(tf.minimum(xmax, width - 1.0), 0.0)
clipped_boxes = tf.concat(
[clipped_ymin, clipped_xmin, clipped_ymax, clipped_xmax],
axis=-1)
return clipped_boxes
def compute_outer_boxes(boxes, image_shape, scale=1.0):
"""Compute outer box encloses an object with a margin.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
scale: a float number specifying the scale of output outer boxes to input
`boxes`.
Returns:
outer_boxes: a tensor whose shape is the same as `boxes` representing the
outer boxes.
"""
if scale < 1.0:
raise ValueError(
'scale is {}, but outer box scale must be greater than 1.0.'.format(
scale))
centers_y = (boxes[..., 0] + boxes[..., 2]) / 2.0
centers_x = (boxes[..., 1] + boxes[..., 3]) / 2.0
box_height = (boxes[..., 2] - boxes[..., 0]) * scale
box_width = (boxes[..., 3] - boxes[..., 1]) * scale
outer_boxes = tf.stack([centers_y - box_height / 2.0,
centers_x - box_width / 2.0,
centers_y + box_height / 2.0,
centers_x + box_width / 2.0], axis=1)
outer_boxes = clip_boxes(outer_boxes, image_shape)
return outer_boxes
def encode_boxes(boxes, anchors, weights=None):
"""Encode boxes to targets.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
encoded box targets.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('encode_boxes'):
boxes = tf.cast(boxes, dtype=anchors.dtype)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
box_h = ymax - ymin + 1.0
box_w = xmax - xmin + 1.0
box_yc = ymin + 0.5 * box_h
box_xc = xmin + 0.5 * box_w
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin + 1.0
anchor_w = anchor_xmax - anchor_xmin + 1.0
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
encoded_dy = (box_yc - anchor_yc) / anchor_h
encoded_dx = (box_xc - anchor_xc) / anchor_w
encoded_dh = tf.math.log(box_h / anchor_h)
encoded_dw = tf.math.log(box_w / anchor_w)
if weights:
encoded_dy *= weights[0]
encoded_dx *= weights[1]
encoded_dh *= weights[2]
encoded_dw *= weights[3]
encoded_boxes = tf.concat(
[encoded_dy, encoded_dx, encoded_dh, encoded_dw],
axis=-1)
return encoded_boxes
def decode_boxes(encoded_boxes, anchors, weights=None):
"""Decode boxes.
Args:
encoded_boxes: a tensor whose last dimension is 4 representing the
coordinates of encoded boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
decoded box targets.
"""
if encoded_boxes.shape[-1] != 4:
raise ValueError(
'encoded_boxes.shape[-1] is {:d}, but must be 4.'
.format(encoded_boxes.shape[-1]))
with tf.name_scope('decode_boxes'):
encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)
dy = encoded_boxes[..., 0:1]
dx = encoded_boxes[..., 1:2]
dh = encoded_boxes[..., 2:3]
dw = encoded_boxes[..., 3:4]
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
dh = tf.minimum(dh, BBOX_XFORM_CLIP)
dw = tf.minimum(dw, BBOX_XFORM_CLIP)
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin + 1.0
anchor_w = anchor_xmax - anchor_xmin + 1.0
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
decoded_boxes_yc = dy * anchor_h + anchor_yc
decoded_boxes_xc = dx * anchor_w + anchor_xc
decoded_boxes_h = tf.exp(dh) * anchor_h
decoded_boxes_w = tf.exp(dw) * anchor_w
decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h
decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w
decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h - 1.0
decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w - 1.0
decoded_boxes = tf.concat(
[decoded_boxes_ymin, decoded_boxes_xmin,
decoded_boxes_ymax, decoded_boxes_xmax],
axis=-1)
return decoded_boxes
def filter_boxes(boxes, scores, image_shape, min_size_threshold):
"""Filter and remove boxes that are too small or fall outside the image.
Args:
boxes: a tensor whose last dimension is 4 representing the
coordinates of boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
image_shape: a tensor whose shape is the same as, or `broadcastable` to
`boxes` except the last dimension, which is 2, representing
[height, width] of the scaled image.
min_size_threshold: a float representing the minimal box size in each
side (w.r.t. the scaled image). Boxes whose sides are smaller than it will
be filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with 0.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the positinon of the filtered boxes filled with 0.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('filter_boxes'):
if isinstance(image_shape, (list, tuple)):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0]
width = image_shape[..., 1]
ymin = boxes[..., 0]
xmin = boxes[..., 1]
ymax = boxes[..., 2]
xmax = boxes[..., 3]
h = ymax - ymin + 1.0
w = xmax - xmin + 1.0
yc = ymin + 0.5 * h
xc = xmin + 0.5 * w
min_size = tf.cast(tf.maximum(min_size_threshold, 1.0), dtype=boxes.dtype)
filtered_size_mask = tf.logical_and(
tf.greater(h, min_size), tf.greater(w, min_size))
filtered_center_mask = tf.logical_and(
tf.logical_and(tf.greater(yc, 0.0), tf.less(yc, height)),
tf.logical_and(tf.greater(xc, 0.0), tf.less(xc, width)))
filtered_mask = tf.logical_and(filtered_size_mask, filtered_center_mask)
filtered_scores = tf.where(filtered_mask, scores, tf.zeros_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def filter_boxes_by_scores(boxes, scores, min_score_threshold):
"""Filter and remove boxes whose scores are smaller than the threshold.
Args:
boxes: a tensor whose last dimension is 4 representing the
coordinates of boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
min_score_threshold: a float representing the minimal box score threshold.
Boxes whose score are smaller than it will be filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with 0.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('filter_boxes_by_scores'):
filtered_mask = tf.greater(scores, min_score_threshold)
filtered_scores = tf.where(filtered_mask, scores, tf.zeros_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def top_k_boxes(boxes, scores, k):
"""Sort and select top k boxes according to the scores.
Args:
boxes: a tensor of shape [batch_size, N, 4] representing the coordiante of
the boxes. N is the number of boxes per image.
scores: a tensor of shsape [batch_size, N] representing the socre of the
boxes.
k: an integer or a tensor indicating the top k number.
Returns:
selected_boxes: a tensor of shape [batch_size, k, 4] representing the
selected top k box coordinates.
selected_scores: a tensor of shape [batch_size, k] representing the selected
top k box scores.
"""
with tf.name_scope('top_k_boxes'):
selected_scores, top_k_indices = tf.nn.top_k(scores, k=k, sorted=True)
batch_size, _ = scores.get_shape().as_list()
if batch_size == 1:
selected_boxes = tf.squeeze(
tf.gather(boxes, top_k_indices, axis=1), axis=1)
else:
top_k_indices_shape = tf.shape(input=top_k_indices)
batch_indices = (
tf.expand_dims(tf.range(top_k_indices_shape[0]), axis=-1) *
tf.ones([1, top_k_indices_shape[-1]], dtype=tf.int32))
gather_nd_indices = tf.stack([batch_indices, top_k_indices], axis=-1)
selected_boxes = tf.gather_nd(boxes, gather_nd_indices)
return selected_boxes, selected_scores
def bbox_overlap(boxes, gt_boxes):
"""Calculates the overlap between proposal and ground truth boxes.
Some `gt_boxes` may have been padded. The returned `iou` tensor for these
boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = tf.maximum(bb_x_min, tf.transpose(a=gt_x_min, perm=[0, 2, 1]))
i_xmax = tf.minimum(bb_x_max, tf.transpose(a=gt_x_max, perm=[0, 2, 1]))
i_ymin = tf.maximum(bb_y_min, tf.transpose(a=gt_y_min, perm=[0, 2, 1]))
i_ymax = tf.minimum(bb_y_max, tf.transpose(a=gt_y_max, perm=[0, 2, 1]))
i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0)
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(a=gt_area, perm=[0, 2, 1]) - i_area + 1e-8
# Calculates IoU.
iou = i_area / u_area
# Fills -1 for padded ground truth boxes.
padding_mask = tf.less(i_xmin, tf.zeros_like(i_xmin))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
return iou
|
PyTorch/Forecasting/TFT/triton/runner/maintainer | maintainer | container | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any
class Container(abc.ABC):
def __init__(self, name: str):
self.name = name
self._container = None
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> Any:
"""
Run command inside container
Args:
command: command to execute
Returns:
Any
"""
pass
|
PyTorch/SpeechRecognition/wav2vec2/utils | utils | download_utils | #!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import requests
import os
import tarfile
import tqdm
def download_file(url, dest_folder, fname, overwrite=False):
fpath = os.path.join(dest_folder, fname)
if os.path.isfile(fpath):
if overwrite:
print("Overwriting existing file")
else:
print("File exists, skipping download.")
return
tmp_fpath = fpath + '.tmp'
if not os.path.exists(os.path.dirname(tmp_fpath)):
os.makedirs(os.path.dirname(tmp_fpath))
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
chunk_size = 1024 * 1024 # 1MB
total_chunks = int(file_size / chunk_size)
with open(tmp_fpath, 'wb') as fp:
content_iterator = r.iter_content(chunk_size=chunk_size)
chunks = tqdm.tqdm(content_iterator, total=total_chunks,
unit='MB', desc=fpath, leave=True)
for chunk in chunks:
fp.write(chunk)
os.rename(tmp_fpath, fpath)
def md5_checksum(fpath, target_hash):
file_hash = hashlib.md5()
with open(fpath, "rb") as fp:
for chunk in iter(lambda: fp.read(1024*1024), b""):
file_hash.update(chunk)
return file_hash.hexdigest() == target_hash
def extract(fpath, dest_folder):
if fpath.endswith('.tar.gz'):
mode = 'r:gz'
elif fpath.endswith('.tar'):
mode = 'r:'
else:
raise IOError('fpath has unknown extention: %s' % fpath)
with tarfile.open(fpath, mode) as tar:
members = tar.getmembers()
for member in tqdm.tqdm(iterable=members, total=len(members), leave=True):
tar.extract(path=dest_folder, member=member)
|
PyTorch/SpeechRecognition/Jasper/platform | platform | DGX1-16GB_Jasper_AMP_8GPU | #!/bin/bash
NUM_GPUS=8 AMP=true BATCH_SIZE=64 GRAD_ACCUMULATION_STEPS=4 bash scripts/train.sh "$@"
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | box_list_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_list."""
import tensorflow as tf
from object_detection.core import box_list
class BoxListTest(tf.test.TestCase):
"""Tests for BoxList class."""
def test_num_boxes(self):
data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32)
expected_num_boxes = 3
boxes = box_list.BoxList(data)
with self.test_session() as sess:
num_boxes_output = sess.run(boxes.num_boxes())
self.assertEquals(num_boxes_output, expected_num_boxes)
def test_get_correct_center_coordinates_and_sizes(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
boxes = box_list.BoxList(tf.constant(boxes))
centers_sizes = boxes.get_center_coordinates_and_sizes()
expected_centers_sizes = [[15, 0.35], [12.5, 0.25], [10, 0.3], [5, 0.3]]
with self.test_session() as sess:
centers_sizes_out = sess.run(centers_sizes)
self.assertAllClose(centers_sizes_out, expected_centers_sizes)
def test_create_box_list_with_dynamic_shape(self):
data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32)
indices = tf.reshape(tf.where(tf.greater([1, 0, 1], 0)), [-1])
data = tf.gather(data, indices)
assert data.get_shape().as_list() == [None, 4]
expected_num_boxes = 2
boxes = box_list.BoxList(data)
with self.test_session() as sess:
num_boxes_output = sess.run(boxes.num_boxes())
self.assertEquals(num_boxes_output, expected_num_boxes)
def test_transpose_coordinates(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
boxes = box_list.BoxList(tf.constant(boxes))
boxes.transpose_coordinates()
expected_corners = [[10.0, 10.0, 15.0, 20.0], [0.1, 0.2, 0.4, 0.5]]
with self.test_session() as sess:
corners_out = sess.run(boxes.get())
self.assertAllClose(corners_out, expected_corners)
def test_box_list_invalid_inputs(self):
data0 = tf.constant([[[0, 0, 1, 1], [3, 4, 5, 5]]], tf.float32)
data1 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.float32)
data2 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.int32)
with self.assertRaises(ValueError):
_ = box_list.BoxList(data0)
with self.assertRaises(ValueError):
_ = box_list.BoxList(data1)
with self.assertRaises(ValueError):
_ = box_list.BoxList(data2)
def test_num_boxes_static(self):
box_corners = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
boxes = box_list.BoxList(tf.constant(box_corners))
self.assertEquals(boxes.num_boxes_static(), 2)
self.assertEquals(type(boxes.num_boxes_static()), int)
def test_num_boxes_static_for_uninferrable_shape(self):
placeholder = tf.placeholder(tf.float32, shape=[None, 4])
boxes = box_list.BoxList(placeholder)
self.assertEquals(boxes.num_boxes_static(), None)
def test_as_tensor_dict(self):
boxlist = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32))
boxlist.add_field('classes', tf.constant([0, 1]))
boxlist.add_field('scores', tf.constant([0.75, 0.2]))
tensor_dict = boxlist.as_tensor_dict()
expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]]
expected_classes = [0, 1]
expected_scores = [0.75, 0.2]
with self.test_session() as sess:
tensor_dict_out = sess.run(tensor_dict)
self.assertAllEqual(3, len(tensor_dict_out))
self.assertAllClose(expected_boxes, tensor_dict_out['boxes'])
self.assertAllEqual(expected_classes, tensor_dict_out['classes'])
self.assertAllClose(expected_scores, tensor_dict_out['scores'])
def test_as_tensor_dict_with_features(self):
boxlist = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32))
boxlist.add_field('classes', tf.constant([0, 1]))
boxlist.add_field('scores', tf.constant([0.75, 0.2]))
tensor_dict = boxlist.as_tensor_dict(['boxes', 'classes', 'scores'])
expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]]
expected_classes = [0, 1]
expected_scores = [0.75, 0.2]
with self.test_session() as sess:
tensor_dict_out = sess.run(tensor_dict)
self.assertAllEqual(3, len(tensor_dict_out))
self.assertAllClose(expected_boxes, tensor_dict_out['boxes'])
self.assertAllEqual(expected_classes, tensor_dict_out['classes'])
self.assertAllClose(expected_scores, tensor_dict_out['scores'])
def test_as_tensor_dict_missing_field(self):
boxlist = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32))
boxlist.add_field('classes', tf.constant([0, 1]))
boxlist.add_field('scores', tf.constant([0.75, 0.2]))
with self.assertRaises(ValueError):
boxlist.as_tensor_dict(['foo', 'bar'])
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Forecasting/TFT/scripts | scripts | benchmark | #! /bin/bash
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NUM_GPUS=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l)
[ $NUM_GPUS -eq 16 ] && WORKER_NUMS=(1 8 16) || WORKER_NUMS=(1 8)
DATASETS=(electricity traffic)
rm -r /tmp/benchmark_results
for DATASET in ${DATASETS[@]}
do
for NGPU in ${WORKER_NUMS[@]}
do
for BATCH_SIZE in 512 1024 1536 2048 2560
do
for USE_AMP in --use_amp ""
do
for AFFINITY in "--affinity disabled" "--affinity single" "--affinity socket_unique_interleaved"
do
EXP_NAME="TFT_benchmark_${DATASET}_BS_${BATCH_SIZE}_${NGPU}GPU${USE_AMP}_${AFFINITY}"
python -m torch.distributed.run --nproc_per_node=${NGPU} train.py \
--dataset ${DATASET} \
--data_path /data/processed/${DATASET}_bin \
--batch_size=${BATCH_SIZE} \
--lr 5e-4 \
--epochs 1 \
--sample 100000 5000 \
--seed 1 \
${USE_AMP} \
${AFFINITY} \
--clip_grad 0.1 \
--results /tmp/benchmark_results/${EXP_NAME}
done
done
done
done
done
for P in `ls /tmp/benchmark_results/`;
do
echo ${P}
tail -n 1 /tmp/benchmark_results/${P}/dllogger.json
done
|
PyTorch/Classification/ConvNets/resnext101-32x4d/training/AMP | AMP | DGX1V_resnext101-32x4d_AMP_90E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnext101-32x4d --precision AMP --mode convergence --platform DGX1V /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
|
PyTorch/Recommendation/DLRM/dlrm/utils/checkpointing | checkpointing | model | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from os.path import join
from typing import Sequence, Any, Dict
import torch
_BOTTOM_MLP_FILE = "bottom_model.mlp.pt"
_TOP_MLP_FILE = "top_model.mlp.pt"
_TOP_OUT_FILE = "top_model.out.pt"
_EMBEDDING_METADATA_FILE = "embeddings.metadata.pt"
_METADATA_FILE = "metadata.pt"
def _get_embedding_file(embedding_index: int) -> str:
return f"bottom_model.embeddings.{embedding_index}.bin"
def _get_embedding_meta_file(embedding_index: int) -> str:
return f"embeddings.{embedding_index}.meta.pt"
class DlrmCheckpointWriter:
"""
Class responsible for saving checkpoints of DLRM model parts.
Depends on `dlrm.nn.embeddings.Embeddings` and `dlrm.nn.mlps.AbstractMlp` interfaces
(for handling multiple model configurations)
"""
def __init__(self, embedding_indices: Sequence[int], config: Dict[str, Any]):
self._embedding_indices = embedding_indices
self._config = config
def save_embeddings(self, checkpoint_path: str, model):
self._ensure_directory(checkpoint_path)
for embedding_index, weight in zip(self._embedding_indices, model.bottom_model.embeddings.weights):
self._save_as_bytes(weight.data, join(checkpoint_path, _get_embedding_file(embedding_index)))
torch.save({"shape": weight.shape}, join(checkpoint_path, _get_embedding_meta_file(embedding_index)))
def save_bottom_mlp(self, checkpoint_path: str, model):
self._ensure_directory(checkpoint_path)
torch.save(self._mlp_state(model.bottom_model.mlp), join(checkpoint_path, _BOTTOM_MLP_FILE))
def save_top_model(self, checkpoint_path: str, model):
self._ensure_directory(checkpoint_path)
# DistributedDataParallel wraps top_model under "module" attribute
top_model = model.top_model.module if hasattr(model.top_model, 'module') else model.top_model
torch.save(self._mlp_state(top_model.mlp), join(checkpoint_path, _TOP_MLP_FILE))
torch.save(top_model.out.state_dict(), join(checkpoint_path, _TOP_OUT_FILE))
def save_metadata(self, checkpoint_path: str, data: Dict[str, Any]):
self._ensure_directory(checkpoint_path)
torch.save({"data": data, "config": self._config}, join(checkpoint_path, _METADATA_FILE))
def _ensure_directory(self, checkpoint_path: str):
os.makedirs(checkpoint_path, exist_ok=True)
def _mlp_state(self, mlp):
return {
"weights": [x.to(torch.float32) for x in mlp.weights],
"biases": [x.to(torch.float32) for x in mlp.biases]
}
def _save_as_bytes(self, tensor: torch.Tensor, path: str):
with open(path, "wb+") as file:
file.write(tensor.cpu().numpy().astype(np.float32).tobytes())
class DlrmCheckpointLoader:
"""
Class responsible for loading checkpoints of DLRM model parts.
Depends on `dlrm.nn.embeddings.Embeddings` and `dlrm.nn.mlps.AbstractMlp` interfaces
(for handling multiple model configurations)
"""
def __init__(self, embedding_indices: Sequence[int], device: str = "cpu"):
self._embedding_indices = embedding_indices
self._device = device
def load_embeddings(self, checkpoint_path: str, model):
embedding_weights = (self._load_from_bytes(join(checkpoint_path, _get_embedding_file(index)),
self._get_embedding_shape(checkpoint_path, index))
for index in self._embedding_indices)
model.bottom_model.embeddings.load_weights(embedding_weights)
def load_bottom_mlp(self, checkpoint_path: str, model):
bottom_mlp_state = self._load(checkpoint_path, _BOTTOM_MLP_FILE)
model.bottom_model.mlp.load_state(bottom_mlp_state["weights"], bottom_mlp_state["biases"])
def load_top_model(self, checkpoint_path: str, model):
# DistributedDataParallel wraps top_model under "module" attribute
top_model = model.top_model.module if hasattr(model.top_model, 'module') else model.top_model
top_mlp_state = self._load(checkpoint_path, _TOP_MLP_FILE)
top_model.mlp.load_state(top_mlp_state["weights"], top_mlp_state["biases"])
top_out_state = self._load(checkpoint_path, _TOP_OUT_FILE)
top_model.out.load_state_dict(top_out_state)
def _load(self, checkpoint_path: str, state_path: str):
data = torch.load(join(checkpoint_path, state_path), map_location=self._device)
return {self._strip_key(key): value for key, value in data.items()}
def _strip_key(self, key: str):
# DistributedDataParallel wraps top_model under "module" attribute
prefix = "module."
if key.startswith(prefix):
return key[len(prefix):]
return key
def _load_from_bytes(self, path: str, shape) -> torch.Tensor:
with open(path, "rb") as file:
array = np.frombuffer(file.read(), dtype=np.float32).reshape(*shape)
return torch.from_numpy(array).to(self._device)
def _get_embedding_shape(self, checkpoint_path: str, index: int) -> tuple:
embedding_meta = torch.load(join(checkpoint_path, _get_embedding_meta_file(index)))
return embedding_meta["shape"]
|
PyTorch/Detection/SSD/examples | examples | SSD300_FP32_EVAL | # This script evaluates SSD300 model in FP32 using 32 batch size on 1 GPU
# Usage: ./SSD300_FP32_EVAL.sh <path to this repository> <path to dataset> <path to checkpoint> <additional flags>
python $1/main.py --backbone resnet50 --ebs 32 --data $2 --mode evaluation --no-amp --data-layout channels_first --checkpoint $3 ${@:4}
|
TensorFlow/Detection/SSD/models/research/object_detection/box_coders | box_coders | mean_stddev_box_coder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mean stddev box coder.
This box coder use the following coding schema to encode boxes:
rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev.
"""
from object_detection.core import box_coder
from object_detection.core import box_list
class MeanStddevBoxCoder(box_coder.BoxCoder):
"""Mean stddev box coder."""
def __init__(self, stddev=0.01):
"""Constructor for MeanStddevBoxCoder.
Args:
stddev: The standard deviation used to encode and decode boxes.
"""
self._stddev = stddev
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of N anchors.
Returns:
a tensor representing N anchor-encoded boxes
Raises:
ValueError: if the anchors still have deprecated stddev field.
"""
box_corners = boxes.get()
if anchors.has_field('stddev'):
raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and "
"should not be specified in the box list.")
means = anchors.get()
return (box_corners - means) / self._stddev
def _decode(self, rel_codes, anchors):
"""Decode.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes
Raises:
ValueError: if the anchors still have deprecated stddev field and expects
the decode method to use stddev value from that field.
"""
means = anchors.get()
if anchors.has_field('stddev'):
raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and "
"should not be specified in the box list.")
box_corners = rel_codes * self._stddev + means
return box_list.BoxList(box_corners)
|
TensorFlow/Detection/SSD/examples | examples | SSD320_FP16_inference | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PIPELINE_CONFIG_PATH=${1:-"/workdir/models/research/configs"}"/ssd320_full_1gpus.config"
export TF_ENABLE_AUTO_MIXED_PRECISION=1
SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}")
OBJECT_DETECTION=$(realpath $SCRIPT_DIR/../object_detection/)
PYTHONPATH=$PYTHONPATH:$OBJECT_DETECTION
python $SCRIPT_DIR/SSD320_inference.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
"${@:2}"
|
PyTorch/SpeechSynthesis/FastPitch/scripts | scripts | inference_example | #!/usr/bin/env bash
export CUDNN_V8_API_ENABLED=1 # Keep the flag for older containers
export TORCH_CUDNN_V8_API_ENABLED=1
: ${DATASET_DIR:="data/LJSpeech-1.1"}
: ${BATCH_SIZE:=32}
: ${FILELIST:="phrases/devset10.tsv"}
: ${AMP:=false}
: ${TORCHSCRIPT:=true}
: ${WARMUP:=0}
: ${REPEATS:=1}
: ${CPU:=false}
: ${PHONE:=true}
: ${CUDNN_BENCHMARK:=false}
# Paths to pre-trained models downloadable from NVIDIA NGC (LJSpeech-1.1)
FASTPITCH_LJ="pretrained_models/fastpitch/nvidia_fastpitch_210824.pt"
HIFIGAN_LJ="pretrained_models/hifigan/hifigan_gen_checkpoint_10000_ft.pt"
WAVEGLOW_LJ="pretrained_models/waveglow/nvidia_waveglow256pyt_fp16.pt"
# Mel-spectrogram generator (optional; can synthesize from ground-truth spectrograms)
: ${FASTPITCH=$FASTPITCH_LJ}
# Vocoder (set only one)
: ${HIFIGAN=$HIFIGAN_LJ}
# : ${WAVEGLOW=$WAVEGLOW_LJ}
[[ "$FASTPITCH" == "$FASTPITCH_LJ" && ! -f "$FASTPITCH" ]] && { echo "Downloading $FASTPITCH from NGC..."; bash scripts/download_models.sh fastpitch; }
[[ "$WAVEGLOW" == "$WAVEGLOW_LJ" && ! -f "$WAVEGLOW" ]] && { echo "Downloading $WAVEGLOW from NGC..."; bash scripts/download_models.sh waveglow; }
[[ "$HIFIGAN" == "$HIFIGAN_LJ" && ! -f "$HIFIGAN" ]] && { echo "Downloading $HIFIGAN from NGC..."; bash scripts/download_models.sh hifigan-finetuned-fastpitch; }
if [[ "$HIFIGAN" == "$HIFIGAN_LJ" && "$FASTPITCH" != "$FASTPITCH_LJ" ]]; then
echo -e "\nNOTE: Using HiFi-GAN checkpoint trained for the LJSpeech-1.1 dataset."
echo -e "NOTE: If you're using a different dataset, consider training a new HiFi-GAN model or switch to WaveGlow."
echo -e "NOTE: See $0 for details.\n"
fi
# Synthesis
: ${SPEAKER:=0}
: ${DENOISING:=0.01}
if [ ! -n "$OUTPUT_DIR" ]; then
OUTPUT_DIR="./output/audio_$(basename ${FILELIST} .tsv)"
[ "$AMP" = true ] && OUTPUT_DIR+="_fp16"
[ "$AMP" = false ] && OUTPUT_DIR+="_fp32"
[ -n "$FASTPITCH" ] && OUTPUT_DIR+="_fastpitch"
[ ! -n "$FASTPITCH" ] && OUTPUT_DIR+="_gt-mel"
[ -n "$WAVEGLOW" ] && OUTPUT_DIR+="_waveglow"
[ -n "$HIFIGAN" ] && OUTPUT_DIR+="_hifigan"
OUTPUT_DIR+="_denoise-"${DENOISING}
fi
: ${LOG_FILE:="$OUTPUT_DIR/nvlog_infer.json"}
mkdir -p "$OUTPUT_DIR"
echo -e "\nAMP=$AMP, batch_size=$BATCH_SIZE\n"
ARGS+=" --cuda"
ARGS+=" --dataset-path $DATASET_DIR"
ARGS+=" -i $FILELIST"
ARGS+=" -o $OUTPUT_DIR"
ARGS+=" --log-file $LOG_FILE"
ARGS+=" --batch-size $BATCH_SIZE"
ARGS+=" --denoising-strength $DENOISING"
ARGS+=" --warmup-steps $WARMUP"
ARGS+=" --repeats $REPEATS"
ARGS+=" --speaker $SPEAKER"
[ "$CPU" = false ] && ARGS+=" --cuda"
[ "$AMP" = true ] && ARGS+=" --amp"
[ "$TORCHSCRIPT" = true ] && ARGS+=" --torchscript"
[ -n "$HIFIGAN" ] && ARGS+=" --hifigan $HIFIGAN"
[ -n "$WAVEGLOW" ] && ARGS+=" --waveglow $WAVEGLOW"
[ -n "$FASTPITCH" ] && ARGS+=" --fastpitch $FASTPITCH"
[ "$PHONE" = true ] && ARGS+=" --p-arpabet 1.0"
[[ "$CUDNN_BENCHMARK" = true && "$CPU" = false ]] && ARGS+=" --cudnn-benchmark"
python inference.py $ARGS "$@"
|
PyTorch/Recommendation/NCF | NCF | load | # Copyright (c) 2018, deepakn94. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import pandas as pd
RatingData = namedtuple('RatingData',
['items', 'users', 'ratings', 'min_date', 'max_date'])
def describe_ratings(ratings):
info = RatingData(items=len(ratings['item_id'].unique()),
users=len(ratings['user_id'].unique()),
ratings=len(ratings),
min_date=ratings['timestamp'].min(),
max_date=ratings['timestamp'].max())
print("{ratings} ratings on {items} items from {users} users"
" from {min_date} to {max_date}"
.format(**(info._asdict())))
return info
def process_movielens(ratings, sort=True):
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
if sort:
ratings.sort_values(by='timestamp', inplace=True)
describe_ratings(ratings)
return ratings
def load_ml_100k(filename, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = pd.read_csv(filename, sep='\t', names=names)
return process_movielens(ratings, sort=sort)
def load_ml_1m(filename, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = pd.read_csv(filename, sep='::', names=names, engine='python')
return process_movielens(ratings, sort=sort)
def load_ml_10m(filename, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = pd.read_csv(filename, sep='::', names=names, engine='python')
return process_movielens(ratings, sort=sort)
def load_ml_20m(filename, sort=True):
ratings = pd.read_csv(filename)
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
names = {'userId': 'user_id', 'movieId': 'item_id'}
ratings.rename(columns=names, inplace=True)
return process_movielens(ratings, sort=sort)
def load_unknown(filename, sort=True):
names = ['user_id', 'item_id', 'timestamp']
ratings = pd.read_csv(filename, names=names, header=0, engine='python')
ratings['rating'] = 5
return process_movielens(ratings, sort=sort)
DATASETS = [k.replace('load_', '') for k in locals().keys() if "load_" in k]
def get_dataset_name(filename):
for dataset in DATASETS:
if dataset in filename.replace('-', '_').lower():
return dataset
print("Unknown dataset. Expecting `user_id`, `item_id` , and `timestamp`")
return "unknown"
def implicit_load(filename, sort=True):
func = globals()["load_" + get_dataset_name(filename)]
return func(filename, sort=sort)
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq | fairseq | utils | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from typing import Callable, List
import torch
import torch.nn.functional as F
MANIFOLD_PATH_SEP = "|"
def split_paths(paths: str, separator=os.pathsep) -> List[str]:
return (
paths.split(separator) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP)
)
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
from .modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
warnings.warn(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def index_put(tensor, indices, value):
tensor[indices] = value
return tensor
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def multiply_grads(optimizer, c):
"""Multiplies grads by a constant *c*."""
for param_group in optimizer.param_groups:
for p in param_group["params"]:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
|
PyTorch/LanguageModeling/BERT/distillation | distillation | data_augmentation | # coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import sys
import os
import unicodedata
import re
import logging
import csv
import argparse
import copy
import json
import time
import torch
import numpy as np
sys.path.append('/workspace/bert/')
from tokenization import BertTokenizer
from modeling import BertForMaskedLM
from utils.utils import set_seed
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
StopWordsList = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be',
'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because',
'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',
'there', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so',
'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've',
'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven',
"haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't",
'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't", "'s", "'re"]
def strip_accents(text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except (TypeError, NameError):
# unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
# valid string only includes al
def _is_valid(string):
return True if not re.search('[^a-z]', string) else False
def _read_tsv(input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
def prepare_embedding_retrieval(glove_file, vocab_size=100000):
cnt = 0
words = []
embeddings = {}
# only read first 100,000 words for fast retrieval
with open(glove_file, 'r', encoding='utf-8') as fin:
for line in fin:
items = line.strip().split()
words.append(items[0])
embeddings[items[0]] = [float(x) for x in items[1:]]
cnt += 1
if cnt == vocab_size:
break
vocab = {w: idx for idx, w in enumerate(words)}
ids_to_tokens = {idx: w for idx, w in enumerate(words)}
vector_dim = len(embeddings[ids_to_tokens[0]])
emb_matrix = np.zeros((vocab_size, vector_dim))
for word, v in embeddings.items():
if word == '<unk>':
continue
emb_matrix[vocab[word], :] = v
# normalize each word vector
d = (np.sum(emb_matrix ** 2, 1) ** 0.5)
emb_norm = (emb_matrix.T / d).T
return emb_norm, vocab, ids_to_tokens
class DataAugmentor(object):
def __init__(self, model, tokenizer, emb_norm, vocab, ids_to_tokens, M, N, p):
self.model = model
self.tokenizer = tokenizer
self.emb_norm = emb_norm
self.vocab = vocab
self.ids_to_tokens = ids_to_tokens
self.M = M
self.N = N
self.p = p
def _word_distance(self, word):
if word not in self.vocab.keys():
return []
word_idx = self.vocab[word]
word_emb = self.emb_norm[word_idx]
dist = np.dot(self.emb_norm, word_emb.T)
dist[word_idx] = -np.Inf
candidate_ids = np.argsort(-dist)[:self.M]
return [self.ids_to_tokens[idx] for idx in candidate_ids][:self.M]
def _masked_language_model(self, sent, word_pieces, mask_id, ptr):
tokenized_text = self.tokenizer.tokenize(sent)[: 510]
tokenized_text = ['[CLS]'] + tokenized_text
tokenized_len = len(tokenized_text)
tokenized_text = word_pieces + ['[SEP]'] + tokenized_text[1:] + ['[SEP]']
if len(tokenized_text) > 512:
tokenized_text = tokenized_text[:512]
token_ids = self.tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [0] * (tokenized_len + 1) + [1] * (len(tokenized_text) - tokenized_len - 1)
tokens_tensor = torch.tensor([token_ids]).to(device)
segments_tensor = torch.tensor([segments_ids]).to(device)
self.model.to(device)
predictions = self.model(tokens_tensor, segments_tensor)
word_candidates = torch.argsort(predictions[0, mask_id], descending=True)[:self.M].tolist()
word_candidates = self.tokenizer.convert_ids_to_tokens(word_candidates)
return list(filter(lambda x: x.find("##"), word_candidates))
def _word_augment(self, sentence, mask_token_idx, mask_token, ptr):
word_pieces = self.tokenizer.tokenize(sentence)
word_pieces = ['[CLS]'] + word_pieces[: 510]
tokenized_len = len(word_pieces)
token_idx = -1
for i in range(1, tokenized_len):
if "##" not in word_pieces[i]:
token_idx = token_idx + 1
if token_idx < mask_token_idx:
word_piece_ids = []
elif token_idx == mask_token_idx:
word_piece_ids = [i]
else:
break
else:
word_piece_ids.append(i)
print("tobe masked", mask_token)
for junk in word_piece_ids:
print("masking", word_pieces[junk])
if len(word_piece_ids) == 1:
word_pieces[word_piece_ids[0]] = '[MASK]'
candidate_words = self._masked_language_model(
sentence, word_pieces, word_piece_ids[0], ptr)
elif len(word_piece_ids) > 1:
candidate_words = self._word_distance(mask_token)
else:
logger.info("invalid input sentence!")
return None
if len(candidate_words)==0:
candidate_words.append(mask_token)
return candidate_words
def augment(self, sent, blacklist=""):
candidate_sents = [sent]
all_tokens = self.tokenizer.basic_tokenizer.tokenize(sent)
blacklist_tokens = []
if blacklist != "":
blacklist_tokens = self.tokenizer.basic_tokenizer.tokenize(blacklist)
logger.info(blacklist_tokens)
candidate_words = {}
ptr = 0
if len(all_tokens) > 512:
print("GREATER")
#400 chosen to account for additional tokens created by workpiece. Room of 512 - 400
while ptr < len(all_tokens):
tokens = all_tokens[ptr: ptr+400]
for (idx, word) in enumerate(tokens):
temp_sent = " ".join(tokens)
if _is_valid(word) and word not in StopWordsList and word not in blacklist_tokens:
augment_temp = self._word_augment(temp_sent, idx, word, ptr)
if augment_temp is not None:
candidate_words[idx] = augment_temp
ptr += 400
logger.info(candidate_words)
tokens = all_tokens
cnt = 0
while cnt < self.N:
new_sent = list(tokens)
for idx in candidate_words.keys():
candidate_word = random.choice(candidate_words[idx])
x = random.random()
if x < self.p:
new_sent[idx] = candidate_word
if " ".join(new_sent) not in candidate_sents:
candidate_sents.append(' '.join(new_sent))
cnt += 1
return candidate_sents
class AugmentProcessor(object):
def __init__(self, augmentor, glue_dir, task_name):
self.augmentor = augmentor
self.glue_dir = glue_dir
self.task_name = task_name
self.augment_ids = {'MRPC': [3, 4], 'MNLI': [8, 9], 'CoLA': [3], 'SST-2': [0],
'STS-B': [7, 8], 'QQP': [3, 4], 'QNLI': [1, 2], 'RTE': [1, 2],
'SQuADv1.1': ['context']}
self.filter_flags = { 'MRPC': True, 'MNLI': True, 'CoLA': False, 'SST-2': True,
'STS-B': True, 'QQP': True, 'QNLI': True, 'RTE': True,
}
assert self.task_name in self.augment_ids
def read_augment_write(self):
task_dir = os.path.join(self.glue_dir, self.task_name)
if "SQuADv2.0" in self.task_name:
raise ValueError("Data augmentation not implemented for task: %s" % self.task_name)
if "SQuAD" in self.task_name:
train_samples = json.load(open(os.path.join(self.glue_dir, "train-v1.1.json"), "r", encoding='utf-8'))
output_filename = os.path.join(self.glue_dir, "train-v1.1_aug.json")
train_samples_aug = copy.deepcopy(train_samples)
else:
train_samples = _read_tsv(os.path.join(task_dir, "train.tsv"))
output_filename = os.path.join(task_dir, "train_aug.tsv")
augment_ids_ = self.augment_ids[self.task_name]
if not "SQuAD" in self.task_name:
filter_flag = self.filter_flags[self.task_name]
if "SQuAD" in self.task_name:
for it, entry in enumerate(train_samples["data"]):
entry_aug = copy.deepcopy(entry)
for i, paragraph in enumerate(entry["paragraphs"]):
entry_aug["paragraphs"][i] = copy.deepcopy(paragraph)
#Augment contexts for each paragraph add append
#to existing list of paragraph contexts
print("state", it, i)
if "context" in self.augment_ids[self.task_name]:
all_answers = ""
#Don't augment or rewrite part of context that contains the answer
for qas_id, qa in enumerate(paragraph["qas"]):
answer = qa["answers"][0]
all_answers += " {}".format(answer["text"])
#ignore first since it is the original sample
augmented_paragraph_contexts = self.augmentor.augment(paragraph["context"], all_answers)[1:]
for augmented_paragraph_context in augmented_paragraph_contexts:
good_context = True
entry_aug["paragraphs"][i]["context"] = augmented_paragraph_context
#fix indices of start position.
for qas_id, qa in enumerate(entry_aug["paragraphs"][i]["qas"]):
# Since the context gets tokenized, augmented on joined on " ", we do the same with the answer
# So that answer is a substring of context
answer_tokens = self.augmentor.tokenizer.basic_tokenizer.tokenize(qa["answers"][0]["text"])
entry_aug["paragraphs"][i]["qas"][qas_id]["answers"][0]["text"] = " ".join(answer_tokens)
#correct answer start based on new context
answer_start_index = augmented_paragraph_context.find(" ".join(answer_tokens))
if answer_start_index == -1:
logger.info("Answer: \"{}\" not found in Context \"{}\"".format(" ".join(answer_tokens), augmented_paragraph_context))
good_context = False
break
else:
entry_aug["paragraphs"][i]["qas"][qas_id]["answers"][0]["answer_start"] = answer_start_index
if good_context:
train_samples_aug["data"][it]["paragraphs"].append(copy.deepcopy(entry_aug["paragraphs"][i]))
#Copy to ensure we are modifying original context
entry_aug["paragraphs"][i] = copy.deepcopy(paragraph)
#Augment questions for each context and append
#to existing list of qas
#Currently augments questions on un-augmented context
#Should it augment questions on augmented context as well?
if "question" in self.augment_ids[self.task_name]:
for qas_id, qa in enumerate(paragraph["qas"]):
#ignore first since it is the original sample
augmented_question_texts = self.augmentor.augment(qa["question"])[1:]
for augmented_question_text in augmented_question_texts:
entry_aug["paragraphs"][i]["qas"][qas_id]["question"] = augmented_question_text
train_samples_aug["data"][it]["paragraphs"][i]["qas"].append(copy.deepcopy(entry_aug["paragraphs"][i]["qas"][qas_id]))
answer = qa["answers"][0]
orig_answer_text = answer["text"]
logger.info("Having been processing {} paragraphs".format(str(i+1)))
logger.info("Having been processing {} documents".format(str(it+1)))
with open(output_filename, 'w', encoding="utf-8") as f:
json.dump(train_samples_aug, f)
else:
with open(output_filename, 'w', newline='', encoding="utf-8") as f:
writer = csv.writer(f, delimiter="\t")
for (i, line) in enumerate(train_samples):
if i == 0 and filter_flag:
writer.writerow(line)
continue
for augment_id in augment_ids_:
sent = line[augment_id]
augmented_sents = self.augmentor.augment(sent)
for augment_sent in augmented_sents:
line[augment_id] = augment_sent
writer.writerow(line)
if (i+1) % 1000 == 0:
logger.info("Having been processing {} examples".format(str(i+1)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pretrained_bert_model", default=None, type=str, required=True,
help="Downloaded pretrained model (bert-base-uncased) is under this folder")
parser.add_argument("--glove_embs", default=None, type=str, required=True,
help="Glove word embeddings file")
parser.add_argument("--glue_dir", default=None, type=str, required=True,
help="GLUE data dir")
parser.add_argument("--task_name", default=None, type=str, required=True,
help="Task(eg. CoLA, SST-2) that we want to do data augmentation for its train set")
parser.add_argument("--N", default=30, type=int,
help="How many times is the corpus expanded?")
parser.add_argument("--M", default=15, type=int,
help="Choose from M most-likely words in the corresponding position")
parser.add_argument("--p", default=0.4, type=float,
help="Threshold probability p to replace current word")
parser.add_argument('--seed', default=42, type=int,
help="random seed for initialization")
args = parser.parse_args()
# logger.info(args)
# Set Seed
n_gpu = torch.cuda.device_count()
set_seed(args.seed, n_gpu)
default_params = {
"CoLA": {"N": 30},
"MNLI": {"N": 10},
"MRPC": {"N": 30},
"SST-2": {"N": 20},
"STS-b": {"N": 30},
"QQP": {"N": 10},
"QNLI": {"N": 20},
"RTE": {"N": 30},
"SQuADv1.1": {"N": 15},
}
if args.task_name in default_params:
args.N = default_params[args.task_name]["N"]
# Prepare data augmentor
tokenizer = BertTokenizer.from_pretrained(args.pretrained_bert_model)
model, config = BertForMaskedLM.from_pretrained(args.pretrained_bert_model)
model.eval()
emb_norm, vocab, ids_to_tokens = prepare_embedding_retrieval(args.glove_embs)
data_augmentor = DataAugmentor(model, tokenizer, emb_norm, vocab, ids_to_tokens, args.M, args.N, args.p)
# Do data augmentation
processor = AugmentProcessor(data_augmentor, args.glue_dir, args.task_name)
processor.read_augment_write()
if __name__ == "__main__":
start = time.time()
main()
print("Total time taken {}".format(time.time() - start))
|
TensorFlow/Segmentation/UNet_3D_Medical/scripts | scripts | unet3d_train_benchmark | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches 3D-UNet run FP32 train benchmark.
# Usage:
# bash examples/unet3d_train_benchmark.sh <number/of/gpus> <path/to/dataset> <path/to/results/directory> <batch/size>
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --exec_mode train --max_steps 80 --benchmark --fold 0 --batch_size $4 --xla --augment |
PyTorch/Detection/SSD/examples | examples | SSD300_FP16_8GPU | # This script launches SSD300 training in FP16 on 8 GPUs using 512 batch size (64 per GPU)
# Usage ./SSD300_FP16_8GPU.sh <path to this repository> <path to dataset> <additional flags>
torchrun --nproc_per_node=8 $1/main.py --backbone resnet50 --warmup 300 --bs 64 --data $2 ${@:3}
|
PyTorch/SpeechSynthesis/Tacotron2/filelists | filelists | ljs_audio_text_train_subset_300_filelist | LJSpeech-1.1/wavs/LJ040-0100.wav|she would sometimes take Lee with her, apparently leaving him alone in the car while she transacted her business.
LJSpeech-1.1/wavs/LJ011-0248.wav|Howard, strange to say, making no attempt to detain him; probably because Mullay promised to return a few days later, and to bring more money.
LJSpeech-1.1/wavs/LJ016-0442.wav|made a determined effort to burn himself to death by throwing himself bodily on to the fire in the condemned ward.
LJSpeech-1.1/wavs/LJ026-0036.wav|and then a balance must be struck and the doubtful form placed in the kingdom with which it has, on the whole, most points in common.
LJSpeech-1.1/wavs/LJ042-0176.wav|One offers oppression, the other poverty. Both offer imperialistic injustice, tinted with two brands of slavery, end quote.
LJSpeech-1.1/wavs/LJ003-0323.wav|Drunkenness, if it ever occurred, should be visited with severe punishment;
LJSpeech-1.1/wavs/LJ045-0161.wav|He was upset over the fact that I would not answer him.
LJSpeech-1.1/wavs/LJ028-0187.wav|Cyrus decided that Babylon must be taken.
LJSpeech-1.1/wavs/LJ037-0178.wav|or one used Remington-Peters cartridge case, which may have been in the revolver before the shooting,
LJSpeech-1.1/wavs/LJ010-0164.wav|Oxford, who was only nineteen at the time his offense was committed, had been born at Birmingham,
LJSpeech-1.1/wavs/LJ019-0178.wav|and abandoned because of the expense. As to the entire reconstruction of Newgate, nothing had been done as yet.
LJSpeech-1.1/wavs/LJ050-0117.wav|particularly those arising from organized groups, within their special jurisdiction.
LJSpeech-1.1/wavs/LJ033-0128.wav|that the bag Oswald carried contained the assassination weapon and has concluded that Frazier and Randle are mistaken as to the length of the bag.
LJSpeech-1.1/wavs/LJ007-0179.wav|defeats the ends of justice, and disgraces the profession of a Christian country.
LJSpeech-1.1/wavs/LJ033-0067.wav|She pointed to the blanket which was on the floor very close to where Ruth Paine was standing.
LJSpeech-1.1/wavs/LJ004-0139.wav|"In the morning the stench and heat were so oppressive that he and every one else on waking rushed unclothed into the yard;"
LJSpeech-1.1/wavs/LJ009-0208.wav|erected on the cart, about four feet high at the head, and gradually sloping towards the horse, giving a full view of the body,
LJSpeech-1.1/wavs/LJ012-0144.wav|and passed it on to Solomons by his daughter, a widow named Abrahams.
LJSpeech-1.1/wavs/LJ001-0020.wav|the "lower-case" being in fact invented in the early Middle Ages.
LJSpeech-1.1/wavs/LJ014-0227.wav|One of these was Mobbs, who lived in the Minories,
LJSpeech-1.1/wavs/LJ040-0146.wav|He noted that Lee liked to give the impression that he did not care for other people but preferred to keep to himself,
LJSpeech-1.1/wavs/LJ001-0149.wav|From the time when books first took their present shape till the end of the sixteenth century, or indeed later,
LJSpeech-1.1/wavs/LJ002-0143.wav|The commissioners who presided were, quote, little otherwise than self-elected
LJSpeech-1.1/wavs/LJ014-0217.wav|Dwyer managed to overpower his assailant, and got to his feet; but Cannon butted at him with his head, and again threw him to the ground,
LJSpeech-1.1/wavs/LJ005-0250.wav|The prisoners were crowded together in the jail, contrary to the requirements of the four George the fourth
LJSpeech-1.1/wavs/LJ042-0049.wav|I never believed I would find more material advantages at this stage of development in the Soviet Union than I might of had in the U.S.
LJSpeech-1.1/wavs/LJ014-0198.wav|Marley at his trial was undefended, and the sheriffs offered him counsel; but he declined. The witnesses against him all spoke the truth, he said;
LJSpeech-1.1/wavs/LJ034-0093.wav|Brennan also testified that Lee Harvey Oswald,
LJSpeech-1.1/wavs/LJ016-0237.wav|With Calcraft's method there were undoubtedly many failures, and it was a common custom for him to go below the gallows
LJSpeech-1.1/wavs/LJ015-0156.wav|Down at Weybridge, where he had a country place, his name was long remembered with gratitude by the poor.
LJSpeech-1.1/wavs/LJ018-0047.wav|He adhered to this almost to the very last. His case had been warmly espoused by the Society for the Protection of Germans in this country,
LJSpeech-1.1/wavs/LJ013-0020.wav|he acted in a manner which excited the suspicions of the crew.
LJSpeech-1.1/wavs/LJ002-0041.wav|Two other wards were appropriated to the master's side debtors; they were each twenty-three feet by fourteen and a half,
LJSpeech-1.1/wavs/LJ008-0227.wav|slipshod and slovenly, in crushed bonnet and dirty shawl, the gown fastened by a single hook,
LJSpeech-1.1/wavs/LJ007-0029.wav|The condition of the capitally-convicted prisoners after sentence was still very disgraceful. The side they occupied, still known as the press-yard,
LJSpeech-1.1/wavs/LJ018-0358.wav|Christina Edmunds had resort to strychnia, the same lethal drug that Palmer used;
LJSpeech-1.1/wavs/LJ007-0198.wav|The windows were to be glazed and painted to prevent prisoners from looking out;
LJSpeech-1.1/wavs/LJ043-0032.wav|After about a two-week separation, Marina Oswald returned to her husband.
LJSpeech-1.1/wavs/LJ035-0071.wav|At a given signal, they reenacted the event. Baker's movements were timed with a stopwatch.
LJSpeech-1.1/wavs/LJ009-0092.wav|his legs give way, he utters a faint groan, and sinks on the floor.
LJSpeech-1.1/wavs/LJ019-0310.wav|which had long been admitted as indispensable, and had never as yet been properly obtained.
LJSpeech-1.1/wavs/LJ038-0071.wav|When he entered the homicide and robbery bureau office, he saw two detectives standing there with Sgt. Gerald L. Hill,
LJSpeech-1.1/wavs/LJ014-0291.wav|he showed symptoms of delirium tremens, and admitted that he had been addicted to the excessive use of stimulants.
LJSpeech-1.1/wavs/LJ014-0283.wav|The jury found him guilty of the latter only, with a point of law reserved. This was fully argued before three judges,
LJSpeech-1.1/wavs/LJ021-0096.wav|under the able and energetic leadership of General Johnson.
LJSpeech-1.1/wavs/LJ045-0075.wav|She was, quote, sorry that I had not married him (the Russian boyfriend) instead, that it would have been much easier for me, end quote.
LJSpeech-1.1/wavs/LJ022-0203.wav|For that we can be thankful to the God who watches over America.
LJSpeech-1.1/wavs/LJ029-0073.wav|that the President would arrive and depart from Dallas' Love Field; that a motorcade through the downtown area of Dallas to the luncheon site should be arranged;
LJSpeech-1.1/wavs/LJ040-0187.wav|According to Sokolow, this indicated a, quote, present intellectual functioning in the upper range of bright normal intelligence, end quote.
LJSpeech-1.1/wavs/LJ016-0101.wav|One of the three, shamming ill, remained all day in his ward, where he employed himself unraveling the rope from the sleeping-mats.
LJSpeech-1.1/wavs/LJ015-0086.wav|He kept open house at Kilburn Priory;
LJSpeech-1.1/wavs/LJ028-0427.wav|The enormous amount of debris which buried the palaces and temples and walls of Nebuchadnezzar's city, in places to the depth of a hundred feet,
LJSpeech-1.1/wavs/LJ048-0248.wav|President Kennedy was scheduled to speak across the street from his hotel in Fort Worth at eight:thirty a.m.
LJSpeech-1.1/wavs/LJ021-0095.wav|We are now prepared to move into this second phase, on the basis of our experience in the first phase
LJSpeech-1.1/wavs/LJ030-0081.wav|They were instructed to watch particularly for thrown objects, sudden actions in the crowd, and any movements toward the Presidential car.
LJSpeech-1.1/wavs/LJ032-0176.wav|Moreover, the bus transfer which he obtained as he left the bus was still in the pocket when he was arrested.
LJSpeech-1.1/wavs/LJ044-0129.wav|and often it is advisable for some people to remain in the background, not underground, end quote.
LJSpeech-1.1/wavs/LJ018-0177.wav|But as there was no independent corroboration of the informer's evidence, according to the custom of the British law,
LJSpeech-1.1/wavs/LJ049-0113.wav|This point was ably made in the nineteen oh two debate by Senator George F. Hoar, the sponsor of the Senate bill, quote,
LJSpeech-1.1/wavs/LJ050-0141.wav|As a beginning step to improve liaison with local law enforcement officials, the Secret Service on August twenty-six, nineteen sixty-four,
LJSpeech-1.1/wavs/LJ013-0156.wav|a scion of the ducal house of Bedford, by his confidential valet and personal attendant.
LJSpeech-1.1/wavs/LJ032-0222.wav|Moreover, Shaneyfelt testified that in his opinion the photographs were not composites of two different photographs
LJSpeech-1.1/wavs/LJ004-0052.wav|which Howard had eulogized some forty years before.
LJSpeech-1.1/wavs/LJ006-0017.wav|with those who made the selection of the first inspectors, and the two gentlemen appointed were probably the most fitted in England to be so employed.
LJSpeech-1.1/wavs/LJ049-0046.wav|Even so, analysis of the motion picture films taken by amateur photographer Zapruder
LJSpeech-1.1/wavs/LJ017-0124.wav|He frequently declared before and during the trial that it would be impossible to find him guilty.
LJSpeech-1.1/wavs/LJ048-0150.wav|while the Secret Service representatives in Dallas
LJSpeech-1.1/wavs/LJ017-0082.wav|He fixed upon a sporting friend, Mr. John Parsons Cook, who had been in luck at Shrewsbury races, both as a winner and a backer,
LJSpeech-1.1/wavs/LJ041-0095.wav|Oswald read a good deal, said Powers, but, quote, he would never be reading any of the shoot-em-up westerns or anything like that.
LJSpeech-1.1/wavs/LJ002-0089.wav|eight. The female felons were deprived of part of the space which the architect had intended for them.
LJSpeech-1.1/wavs/LJ050-0264.wav|The Commission recommends that the present arrangements
LJSpeech-1.1/wavs/LJ039-0177.wav|was greater than from the second to the third shot and required a movement in the basic firing position of the marksmen.
LJSpeech-1.1/wavs/LJ047-0016.wav|The FBI opened a file on Oswald in October nineteen fifty-nine, when news reports appeared of his defection to the Soviet Union.
LJSpeech-1.1/wavs/LJ028-0036.wav|But in those very early days Babylon was little more than a shrine, surrounded with mud huts and date palms.
LJSpeech-1.1/wavs/LJ013-0173.wav|The researches of the police soon laid bare other suspicious facts.
LJSpeech-1.1/wavs/LJ014-0138.wav|Mrs. Manning became still more violent, shouting, "No, no, I will not stand it! You ought to be ashamed of yourselves!"
LJSpeech-1.1/wavs/LJ028-0165.wav|There is, however, a second inner wall, of less thickness than the first, but very little inferior to it in strength.
LJSpeech-1.1/wavs/LJ006-0048.wav|To these were still added an average of about fifty expecting the last penalty of the law; a certain number of transports awaiting removal to the colonies;
LJSpeech-1.1/wavs/LJ032-0133.wav|Lieutenant Day of the Dallas Police Department had "lifted" a palmprint from the underside of the gun barrel
LJSpeech-1.1/wavs/LJ038-0093.wav|Frequently, however, he was confronted with evidence which he could not explain, and he resorted to statements which are known to be lies.
LJSpeech-1.1/wavs/LJ018-0228.wav|Five or six years later, William Roupell minutely described how he had effected the fraud.
LJSpeech-1.1/wavs/LJ046-0084.wav|for the President soon after the assassination, quote,
LJSpeech-1.1/wavs/LJ033-0109.wav|the Commission has carefully considered the testimony of these two witnesses with regard to the length of the bag.
LJSpeech-1.1/wavs/LJ013-0158.wav|One morning in May his lordship was found dead in his bed with his throat cut.
LJSpeech-1.1/wavs/LJ036-0111.wav|Whaley's memory of the lineup is inaccurate. There were four men altogether, not six men, in the lineup with Oswald.
LJSpeech-1.1/wavs/LJ044-0082.wav|His attempt to express himself through his Fair Play for Cuba activities, however,
LJSpeech-1.1/wavs/LJ036-0208.wav|white male, approximately thirty, slender build, height five foot ten inches, weight one hundred sixty-five pounds, end quote.
LJSpeech-1.1/wavs/LJ038-0255.wav|Firearms identification.
LJSpeech-1.1/wavs/LJ031-0111.wav|The elliptical wound in the Governor's back, located slightly to the left of the Governor's right armpit approximately five-eighths inch (a centimeter and a half)
LJSpeech-1.1/wavs/LJ006-0246.wav|On another occasion a young man, who was being violently teased, seized a knife and stabbed his tormentor in the back.
LJSpeech-1.1/wavs/LJ027-0167.wav|Then the gills gradually dry up, as the lungs develop, and they now breathe wholly by lungs, but still retain the tail.
LJSpeech-1.1/wavs/LJ033-0187.wav|However, the complete identity of characteristics between the paper and tape in the bag found on the sixth floor
LJSpeech-1.1/wavs/LJ009-0284.wav|It was stated in evidence before the Commission on Capital Punishment in eighteen sixty-four,
LJSpeech-1.1/wavs/LJ009-0249.wav|When Charles White was executed in eighteen twenty-three for arson, he arranged a handkerchief
LJSpeech-1.1/wavs/LJ015-0149.wav|peas at ten shillings a quart, five-guinea pines, and early asparagus were to be found on his table.
LJSpeech-1.1/wavs/LJ019-0330.wav|Dietaries were drawn up for adoption on the recommendation of a committee of experts.
LJSpeech-1.1/wavs/LJ012-0118.wav|It was a large gold brooch set in pearls, but a portion of the mounting had melted with the heat.
LJSpeech-1.1/wavs/LJ008-0071.wav|In the few years which elapsed between the establishment of the gallows at Newgate
LJSpeech-1.1/wavs/LJ015-0253.wav|he handed over to Pierce a sum of three thousand pounds, his own, whether rightly or wrongly acquired never came out,
LJSpeech-1.1/wavs/LJ045-0102.wav|things apparently went quite smoothly from the time Oswald returned from Mexico until the weekend of November sixteen to seventeen, nineteen sixty-three.
LJSpeech-1.1/wavs/LJ009-0256.wav|Still he resisted.
LJSpeech-1.1/wavs/LJ050-0055.wav|that the PRS files can no longer be limited largely to persons communicating actual threats to the President.
LJSpeech-1.1/wavs/LJ034-0037.wav|Someone sitting on the box facing the window would have his palm in this position if he placed his hand alongside his right hip.
LJSpeech-1.1/wavs/LJ020-0081.wav|and knead for ten minutes, carefully at first, lest the liquids should be wasted, and more boldly when they are absorbed by the paste.
LJSpeech-1.1/wavs/LJ009-0077.wav|The ordinary of Newgate is an orthodox, unaffected, Church of England divine,
LJSpeech-1.1/wavs/LJ008-0107.wav|in his canonicals, and with his head as stiffly erect as a sheriff's coachman.
LJSpeech-1.1/wavs/LJ043-0013.wav|Part of the problem resulted from the fact that, as Jeanne De Mohrenschildt testified,
LJSpeech-1.1/wavs/LJ037-0225.wav|five foot eight inches, black hair, slender, wearing a white jacket, white shirt and dark slacks, end quote,
LJSpeech-1.1/wavs/LJ012-0294.wav|without hesitation brought in a verdict of willful murder.
LJSpeech-1.1/wavs/LJ042-0192.wav|are preferred rather than loud and useless manifestations of protest, end quote, Oswald went on to note, quote,
LJSpeech-1.1/wavs/LJ016-0078.wav|but had to come down again covered with soot and filth just as the officers entered the ward.
LJSpeech-1.1/wavs/LJ028-0174.wav|Other ancient descriptions of the walls have been left us by Ctesias of the fifth century B.C., and by Strabo of the beginning of the Christian era,
LJSpeech-1.1/wavs/LJ019-0002.wav|The time at length approached when a radical and complete change was to come over the old city jail.
LJSpeech-1.1/wavs/LJ032-0271.wav|(two) Oswald's palmprint was on the rifle in a position which shows that he had handled it while it was disassembled,
LJSpeech-1.1/wavs/LJ018-0325.wav|But extra precautions and close supervision have so far proved effectual, and the prisoners are still in custody after a lapse of ten years.
LJSpeech-1.1/wavs/LJ048-0259.wav|However, Chief Rowley did not condone the action of the off-duty agents, particularly since it violated a regulation of the Secret Service,
LJSpeech-1.1/wavs/LJ009-0099.wav|Meanwhile the clergyman, still bent into the form of a sleeping dog,
LJSpeech-1.1/wavs/LJ034-0180.wav|The man was dressed in a light-colored, open-neck shirt which could have been either a sports shirt or a T-shirt,
LJSpeech-1.1/wavs/LJ024-0057.wav|Why then should we leave the fulfillment of this public policy to chance
LJSpeech-1.1/wavs/LJ018-0260.wav|Mr. Justice Byles, in passing sentence, commented severely upon the commission of such crimes by a man in Roupell's position in life,
LJSpeech-1.1/wavs/LJ007-0095.wav|Prisoners indeed were known to boast that they had saved their necks by feigning insanity.
LJSpeech-1.1/wavs/LJ005-0117.wav|Numbers of the jails were still unprovided with chaplains, and the prisoners never heard Divine service.
LJSpeech-1.1/wavs/LJ006-0168.wav|to taking the descriptions of newly-arrived prisoners.
LJSpeech-1.1/wavs/LJ011-0117.wav|devoted its efforts first to a mitigation of the forgery statute, but could not immediately accomplish much.
LJSpeech-1.1/wavs/LJ007-0223.wav|The prison officials appear to be on the side of the inspectors, to the great dissatisfaction of the corporation, who claimed the full allegiance and support of its servants.
LJSpeech-1.1/wavs/LJ009-0176.wav|Seven other crimes, however, were still capital by law, and so continued till the passing of the Criminal Consolidation Acts of eighteen sixty-one.
LJSpeech-1.1/wavs/LJ034-0119.wav|Approximately seven or eight minutes later
LJSpeech-1.1/wavs/LJ014-0226.wav|Only a few have vied with Cannon in fiendish cruelty and brutality.
LJSpeech-1.1/wavs/LJ045-0074.wav|In the letter Marina Oswald stated that her husband had changed a great deal and that she was very lonely in the United States.
LJSpeech-1.1/wavs/LJ012-0044.wav|When his trade was busiest he set up a second establishment, at the head of which, although he was married,
LJSpeech-1.1/wavs/LJ027-0012.wav|All have the same ultimate substance
LJSpeech-1.1/wavs/LJ028-0254.wav|The people, enjoying the greater freedom which Cyrus permitted them, were contented, and life in Babylon went on about as before.
LJSpeech-1.1/wavs/LJ002-0326.wav|The poor debtors were not supplied with beds. Those who could pay the price might hire them from each other,
LJSpeech-1.1/wavs/LJ014-0259.wav|Watts led two lives.
LJSpeech-1.1/wavs/LJ035-0067.wav|from the sixth floor by the time Baker and Truly arrived, Commission counsel asked Baker and Truly to repeat their movements from the time of the shot
LJSpeech-1.1/wavs/LJ010-0146.wav|Attacks upon the sovereign, as I have said, became more common after the accession of the young Queen Victoria in eighteen thirty-eight.
LJSpeech-1.1/wavs/LJ007-0084.wav|The inspectors in the following year, on examining the facts, found that some of these poor creatures had been in confinement for long periods:
LJSpeech-1.1/wavs/LJ049-0204.wav|While in accordance with its mandate
LJSpeech-1.1/wavs/LJ011-0035.wav|Every endeavor was used, however, to obtain a commutation of sentence. His case was twice argued before the judges on points of law,
LJSpeech-1.1/wavs/LJ021-0001.wav|The Fireside Chats of Franklin Delano Roosevelt, by Franklin D Roosevelt, Section six.
LJSpeech-1.1/wavs/LJ008-0148.wav|One night he was missing
LJSpeech-1.1/wavs/LJ011-0237.wav|The jewelers were always a favorite prey of the London thieves.
LJSpeech-1.1/wavs/LJ017-0272.wav|"Ah!" he remarked, "they will have to wait for us then till eight."
LJSpeech-1.1/wavs/LJ049-0067.wav|the radio net in use in motorcades is elaborate and permits a number of different means of communication with various local points.
LJSpeech-1.1/wavs/LJ032-0171.wav|and that this was the same shirt which Oswald wore on the morning of the assassination.
LJSpeech-1.1/wavs/LJ048-0132.wav|which would bring to bear the judgment and experience of members of the White House detail other than the advance agent.
LJSpeech-1.1/wavs/LJ006-0025.wav|France had sent Misseurs Beaumont and De Tocqueville, who subsequently published several interesting works on the subject.
LJSpeech-1.1/wavs/LJ043-0176.wav|If the attack had succeeded and Oswald had been caught, the pictures showing him with his rifle
LJSpeech-1.1/wavs/LJ044-0191.wav|Now there appeared to be no chance to get to Cuba, where he had thought he might find his communist ideal. The U.S. Government would not permit travel there
LJSpeech-1.1/wavs/LJ038-0011.wav|A police car made a U-turn, and as the sirens grew fainter,
LJSpeech-1.1/wavs/LJ002-0244.wav|but its business was much reduced by the extension of the Courts of Conscience.
LJSpeech-1.1/wavs/LJ031-0209.wav|X-rays and photographs were taken preliminarily and the pathological examination began at about eight p.m.
LJSpeech-1.1/wavs/LJ042-0032.wav|and of his initial commitment to that country can best be understood, however, in the context
LJSpeech-1.1/wavs/LJ009-0132.wav|Although this misapplication of religious services still went on,
LJSpeech-1.1/wavs/LJ034-0048.wav|The freshness of prints developed in this manner cannot be estimated,
LJSpeech-1.1/wavs/LJ043-0023.wav|and helped to move the personal effects of Marina Oswald and the baby.
LJSpeech-1.1/wavs/LJ015-0216.wav|This was an important step, and they might easily be robbed some day when Burgess was the guard, provided only that they could be opened.
LJSpeech-1.1/wavs/LJ006-0180.wav|the interior of the jail was more like a bear-garden or the noisy purlieus of a public-house than a prison.
LJSpeech-1.1/wavs/LJ016-0342.wav|The first private execution under the new law took place within the precincts of Maidstone Jail.
LJSpeech-1.1/wavs/LJ025-0170.wav|for it is only the green parts of the plant which, under the influence of sunlight, have the marvelous power of decomposing carbonic acid,
LJSpeech-1.1/wavs/LJ047-0076.wav|In New Orleans. In the middle of May of nineteen sixty-three, Agent Hosty checked Oswald's last known residence and found that he had moved.
LJSpeech-1.1/wavs/LJ005-0011.wav|were first made use of about eighteen twenty-seven. That the need for prison reform was imperative may be gathered from the few out of many instances I have adduced,
LJSpeech-1.1/wavs/LJ033-0142.wav|because the cartons stacked around the southeast corner would shield him.
LJSpeech-1.1/wavs/LJ018-0005.wav|the public mind was greatly agitated by the affair for several months. The story of the murder must be pretty familiar to most of my readers.
LJSpeech-1.1/wavs/LJ049-0183.wav|regarding such threats and that its Protective Research Section is not adequately staffed or equipped
LJSpeech-1.1/wavs/LJ036-0031.wav|and requested a transfer which she might use if she got through the traffic.
LJSpeech-1.1/wavs/LJ011-0285.wav|The door of his place of durance stood open, and Mr. Gee began to consider whether he might not escape.
LJSpeech-1.1/wavs/LJ041-0114.wav|three months prior to his regularly scheduled separation date, ostensibly to care for his mother who had been injured in an accident at her work.
LJSpeech-1.1/wavs/LJ012-0134.wav|Presently the proper person arrived from the consignees, but found the gold-dust gone.
LJSpeech-1.1/wavs/LJ011-0005.wav|A lady in the country, who had thirteen thousand pounds in the stocks, desired her London agent to sell them out.
LJSpeech-1.1/wavs/LJ028-0087.wav|Such was the appearance of the builder of the walls of Babylon.
LJSpeech-1.1/wavs/LJ016-0329.wav|a bill was introduced by Mr. Hibbert, M.P., and accepted by the Government, providing for the future carrying out of executions within prisons.
LJSpeech-1.1/wavs/LJ034-0017.wav|could look southwesterly down Elm Street over the top of the "Rolling Readers" cartons.
LJSpeech-1.1/wavs/LJ044-0086.wav|executive director of the Information Council of the Americas, who also appeared on the program.
LJSpeech-1.1/wavs/LJ038-0100.wav|On November twenty-three, Fritz confronted Oswald with the evidence that he had purchased a rifle under the fictitious name of "Hidell."
LJSpeech-1.1/wavs/LJ049-0019.wav|The last Presidential vehicle with any protection against small-arms fire left the White House in nineteen fifty-three.
LJSpeech-1.1/wavs/LJ021-0125.wav|it was natural that the workers should seek and obtain a statutory declaration of their constitutional right
LJSpeech-1.1/wavs/LJ019-0294.wav|The prison buildings were in many places out of repair; other houses often overlooked them.
LJSpeech-1.1/wavs/LJ009-0211.wav|and on the right the ripping chisel, with which the murders had been committed, were exposed to view.
LJSpeech-1.1/wavs/LJ044-0172.wav|and left for Irving with Marina Oswald and June and most of the Oswalds' effects three days later.
LJSpeech-1.1/wavs/LJ047-0129.wav|FBI informants in the New Orleans area, familiar with pro-Castro or Communist Party activity there,
LJSpeech-1.1/wavs/LJ024-0139.wav|has been tipped out of balance by the courts in direct contradiction of the high purposes of the framers of the Constitution.
LJSpeech-1.1/wavs/LJ005-0106.wav|Jails, of which the old prison at Reading was a specimen, were still left intact.
LJSpeech-1.1/wavs/LJ042-0247.wav|In August of nineteen sixty-three, he gave the New Orleans police as a reason for refusing to permit his family to learn English,
LJSpeech-1.1/wavs/LJ047-0092.wav|On August nine, nineteen sixty-three,
LJSpeech-1.1/wavs/LJ026-0166.wav|back to starch usable as food and the comparison of the green plant and the animal would be complete.
LJSpeech-1.1/wavs/LJ033-0019.wav|According to the testimony of Frazier, Marina Oswald, and Ruth Paine, it appears that Oswald never returned to Irving in midweek
LJSpeech-1.1/wavs/LJ042-0172.wav|must have as its nucleus the traditional ideological best of both systems, and yet be utterly opposed to both systems.
LJSpeech-1.1/wavs/LJ027-0018.wav|All are forced to make concession after concession to their surroundings, and in these concessions all progress in life consists.
LJSpeech-1.1/wavs/LJ041-0187.wav|and he wanted to be on the winning side so that ten thousand years from-now people would look in the history books and say, "Well, this man was ahead of his time."
LJSpeech-1.1/wavs/LJ048-0286.wav|Nor is this goal served when agents remain out until early morning hours, and lose the opportunity to get a reasonable amount of sleep.
LJSpeech-1.1/wavs/LJ018-0037.wav|In searching the prisoner's box, Mr. Briggs' watch was found wrapped up in a piece of leather,
LJSpeech-1.1/wavs/LJ009-0044.wav|His features have no felonious cast;
LJSpeech-1.1/wavs/LJ045-0100.wav|She thought that he might not have become involved in the assassination if people had been kinder to him.
LJSpeech-1.1/wavs/LJ035-0149.wav|She ran inside and up the front stairs into the large open office reserved for clerical employees.
LJSpeech-1.1/wavs/LJ028-0188.wav|In five thirty-eight the city fell, and for a time it became the home of the Persian King.
LJSpeech-1.1/wavs/LJ003-0320.wav|which recommended restrictions upon the number of visitors admitted.
LJSpeech-1.1/wavs/LJ013-0241.wav|The policeman insisted on searching the premises, at which Good displayed some uneasiness.
LJSpeech-1.1/wavs/LJ018-0194.wav|Cummings was repeatedly "run in" for the offense of coining and uttering bad money, whether coin or notes.
LJSpeech-1.1/wavs/LJ046-0135.wav|PRS received items in eight thousand, seven hundred nine cases.
LJSpeech-1.1/wavs/LJ046-0143.wav|These instructions to PRS personnel appear to be the only instance where an effort was made to reduce the criteria to writing.
LJSpeech-1.1/wavs/LJ048-0103.wav|and with the concurrence of the Dallas police, was entirely appropriate, in view of the known desires of the President.
LJSpeech-1.1/wavs/LJ038-0279.wav|I think is going overboard in the other direction.
LJSpeech-1.1/wavs/LJ044-0117.wav|that there were people who understood his activity, end quote.
LJSpeech-1.1/wavs/LJ028-0485.wav|The outer and inner defenses of Babylon were so strong and so high that no enemy could hope to take them,
LJSpeech-1.1/wavs/LJ031-0174.wav|After the President was pronounced dead,
LJSpeech-1.1/wavs/LJ026-0020.wav|If chlorophyll is present, the carbon dioxide of the air serves as a source of carbon,
LJSpeech-1.1/wavs/LJ027-0136.wav|Illustrations quoted from the works of Romanes and Le Conte will make this principle clear.
LJSpeech-1.1/wavs/LJ002-0113.wav|in an age when insolvent acts and bankruptcy courts do so much to relieve the impecunious,
LJSpeech-1.1/wavs/LJ004-0113.wav|It was further ordered that male prisoners should be kept perfectly distinct from the females.
LJSpeech-1.1/wavs/LJ044-0115.wav|he felt that this was a great man that he had received the letter from, end quote.
LJSpeech-1.1/wavs/LJ039-0012.wav|The Commission first learned of this incident when Robert Oswald related it to FBI agents on February nineteen, nineteen sixty-four,
LJSpeech-1.1/wavs/LJ014-0164.wav|as the wickedness and levity of the immense crowd collected at the execution this morning could be imagined by no man,
LJSpeech-1.1/wavs/LJ050-0018.wav|and to keep the Secretary fully informed regarding all significant developments relating to Presidential protection.
LJSpeech-1.1/wavs/LJ012-0131.wav|The letter informed him of the marks and sizes of the cases containing the precious metal,
LJSpeech-1.1/wavs/LJ016-0308.wav|yet the witnesses were not unanimous.
LJSpeech-1.1/wavs/LJ028-0332.wav|Once more, however, he waited till the interval appointed had gone by, and then leading the troops to the place where the four thousand were,
LJSpeech-1.1/wavs/LJ006-0251.wav|but the presence and authority of the governor himself became indispensable.
LJSpeech-1.1/wavs/LJ006-0016.wav|These considerations no doubt had weight
LJSpeech-1.1/wavs/LJ031-0093.wav|Answer: No, sir. Before -- well, in trying to treat an acutely injured patient, you have to establish an airway, adequate ventilation
LJSpeech-1.1/wavs/LJ042-0163.wav|After, however, two years and a lot of growing up, I decided to return to the USA.
LJSpeech-1.1/wavs/LJ031-0220.wav|During the autopsy examination, Federal agents brought the surgeons three pieces of bone recovered from Elm Street and the Presidential automobile.
LJSpeech-1.1/wavs/LJ030-0050.wav|The Presidential limousine.
LJSpeech-1.1/wavs/LJ012-0010.wav|both having been recognized by the clergyman who had performed the ceremony, and the assault had been committed to secure the money
LJSpeech-1.1/wavs/LJ004-0213.wav|Compared with those highly meritorious institutions Newgate still showed but badly.
LJSpeech-1.1/wavs/LJ010-0061.wav|That some thirty or more needy men should hope to revolutionize England is a sufficient proof of the absurdity of their attempt.
LJSpeech-1.1/wavs/LJ022-0195.wav|But it is more than the recovery of the material basis of our individual lives.
LJSpeech-1.1/wavs/LJ039-0102.wav|After familiarization with live ammunition in the twenty-two rifle and the twenty-two pistol,
LJSpeech-1.1/wavs/LJ020-0073.wav|Sift the flour, salt and sugar into a bowl,
LJSpeech-1.1/wavs/LJ040-0038.wav|Such ideas of grandeur were apparently accompanied by notions of oppression.
LJSpeech-1.1/wavs/LJ019-0049.wav|the principles of which were debated by disputants of widely opposite opinions with an earnestness that sometimes bordered upon acrimony.
LJSpeech-1.1/wavs/LJ050-0012.wav|through an Assistant Secretary whose duties also include the direct supervision of the Bureau of the Mint
LJSpeech-1.1/wavs/LJ007-0117.wav|where the upper ward was exclusively appropriated to their use. They also had their meals sent in, and, with the food, wine almost ad libitum.
LJSpeech-1.1/wavs/LJ004-0169.wav|On the dirty bedstead lay a wretched being in the throes of severe illness.
LJSpeech-1.1/wavs/LJ019-0127.wav|or the still more costly process of walling in the whole farm, would have greatly added to the charges of these establishments.
LJSpeech-1.1/wavs/LJ014-0141.wav|and stretching out her hand, she gathered up a quantity of the rue which, following ancient custom dating from the days of the jail fever,
LJSpeech-1.1/wavs/LJ037-0041.wav|The man appeared to step back as the policeman, quote, calmly opened the car door, end quote, and very slowly got out and walked toward the front of the car.
LJSpeech-1.1/wavs/LJ012-0023.wav|He was taken up when still in his teens for stealing a pocketbook, and was sentenced to transportation, but did not get beyond the hulks at Chatham.
LJSpeech-1.1/wavs/LJ032-0115.wav|A few minutes after the rifle was discovered on the sixth floor of the Depository Building
LJSpeech-1.1/wavs/LJ047-0007.wav|It had interviewed him twice shortly after his return to the United States, again a year later at his request
LJSpeech-1.1/wavs/LJ006-0049.wav|an occasional prisoner or two committed by the Houses of Parliament, the Courts of King's Bench, Common Pleas,
LJSpeech-1.1/wavs/LJ028-0065.wav|Eleven years later, in five eighty-six, he destroyed the sacred Hebrew city,
LJSpeech-1.1/wavs/LJ049-0076.wav|The Commission's review of the provisions for Presidential protection at the time of President Kennedy's trip to Dallas demonstrates the need for substantial improvements.
LJSpeech-1.1/wavs/LJ003-0091.wav|Constantly associated with these convicted felons were numbers of juveniles, infants of tender years.
LJSpeech-1.1/wavs/LJ050-0030.wav|The Commission also recommends
LJSpeech-1.1/wavs/LJ013-0122.wav|Stealing plate was about this period the crime of a more aristocratic thief.
LJSpeech-1.1/wavs/LJ046-0013.wav|Prompted by these dismaying statistics, the Commission has inquired into the problems and methods of Presidential protection in effect
LJSpeech-1.1/wavs/LJ035-0134.wav|that they were watching the parade from the top step of the building entrance when Gloria Calverly, who works in the Depository Building,
LJSpeech-1.1/wavs/LJ016-0232.wav|and he owned a pet pony which would follow him about like a dog.
LJSpeech-1.1/wavs/LJ020-0023.wav|If too stiff, warm water, a spoonful at a time until you can handle the paste easily. The danger is in getting it too stiff. Now.
LJSpeech-1.1/wavs/LJ005-0046.wav|The good it tried to do took active shape in the establishment of temporary refuges -- at Hoxton for males, and in the Hackney Road for females
LJSpeech-1.1/wavs/LJ010-0019.wav|As time passed,
LJSpeech-1.1/wavs/LJ049-0130.wav|The Secret Service must rely in large part
LJSpeech-1.1/wavs/LJ024-0023.wav|ever since a similar proposal passed the House of Representatives in eighteen sixty-nine.
LJSpeech-1.1/wavs/LJ018-0315.wav|to whom it was said one hundred pounds apiece had been given down as the price of their infidelity.
LJSpeech-1.1/wavs/LJ029-0037.wav|Advance preparations for President Kennedy's visit to Dallas were primarily the responsibility of two Secret Service agents:
LJSpeech-1.1/wavs/LJ049-0218.wav|between the Secret Service and the President and his family is contemplated.
LJSpeech-1.1/wavs/LJ003-0155.wav|Tailoring and shoemaking was permitted, but it was deemed unsafe to allow a carpenter or blacksmith to have his tools.
LJSpeech-1.1/wavs/LJ013-0113.wav|Robberies as daring in conception as they were boldly executed were common enough.
LJSpeech-1.1/wavs/LJ045-0047.wav|and I told him that
LJSpeech-1.1/wavs/LJ006-0065.wav|were associated together, "of every variety of age, habit, and delinquency, without employment, oversight, or control."
LJSpeech-1.1/wavs/LJ003-0316.wav|It should be peremptorily forbidden to the keeper or any officer to make a pecuniary profit out of the supplies of food, fuel, or other necessaries.
LJSpeech-1.1/wavs/LJ021-0004.wav|Tonight I continue that report, though, because of the shortness of time, I must defer a number of subjects to a later date.
LJSpeech-1.1/wavs/LJ031-0022.wav|Charles R. Baxter, Robert N. McClelland, Ronald C. Jones; the chief neurologist, Dr. William Kemp Clark;
LJSpeech-1.1/wavs/LJ007-0030.wav|consisted of two dozen rooms and fifteen cells. In these various chambers, until just before the inspectors made their report,
LJSpeech-1.1/wavs/LJ021-0137.wav|Step by step we have created all the government agencies necessary to insure, as a general rule, industrial peace,
LJSpeech-1.1/wavs/LJ033-0081.wav|she looked out the breakfast-room window and saw Oswald cross the street and walk toward the driveway where her brother parked his car near the carport.
LJSpeech-1.1/wavs/LJ003-0218.wav|The chapel was filled with a curious but callous congregation, who came to stare at the miserable people thus publicly exposed.
LJSpeech-1.1/wavs/LJ028-0317.wav|Introduced into their assembly, he began to bewail his misfortunes, telling them that
LJSpeech-1.1/wavs/LJ047-0014.wav|the Office of Naval Intelligence, the FBI and the CIA. The information known to the FBI is summarized below.
LJSpeech-1.1/wavs/LJ002-0067.wav|but really kept for the few who had funds sufficient to gain them admission to these more comfortable quarters.
LJSpeech-1.1/wavs/LJ003-0101.wav|must have had a tendency to turn them into the world hardened and accomplished in the ways of vice and crime. End quote.
LJSpeech-1.1/wavs/LJ036-0048.wav|She boarded the Marsalis bus at St. Paul and Elm Streets to return home. She testified further, quote,
LJSpeech-1.1/wavs/LJ022-0129.wav|in making this the most efficient and the cleanest example of public enterprise the world has ever seen.
LJSpeech-1.1/wavs/LJ038-0121.wav|or to answer any questions concerning the card.
LJSpeech-1.1/wavs/LJ031-0095.wav|Before this was accomplished the President's cardiac activity had ceased and closed cardiac massage was instituted, which made it impossible to inspect his back.
LJSpeech-1.1/wavs/LJ007-0131.wav|Enough has probably been extracted from this most damnatory report to give a complete picture of the disgraceful state in which Newgate still remained in eighteen thirty-five.
LJSpeech-1.1/wavs/LJ001-0067.wav|In the Low Countries and Cologne, which were very fertile of printed books, Gothic was the favorite.
LJSpeech-1.1/wavs/LJ011-0061.wav|Let this monster give his name; I am ready to fight him. I am still determined to put myself in the place of Mr. Fauntleroy.
LJSpeech-1.1/wavs/LJ019-0381.wav|in another there was half-heartedness, even apathy and an almost complete contempt for the provisions of the act.
LJSpeech-1.1/wavs/LJ012-0170.wav|According to his statement, when sentenced to death, he had been driven to horse-stealing by the execration which had pursued him after the murder.
LJSpeech-1.1/wavs/LJ005-0090.wav|the first by daily services, the latter by the appointment of schoolmasters and instruction in reading and writing.
LJSpeech-1.1/wavs/LJ049-0127.wav|agencies other than the Secret Service have become involved in phases of the overall problem of protecting our national leaders.
LJSpeech-1.1/wavs/LJ004-0100.wav|An infirmary, consisting of two distinct rooms, one for males and one for females, should be provided for the separate accommodation of the sick.
LJSpeech-1.1/wavs/LJ003-0148.wav|and spent in providing coals, candles, plates, knives, and forks; while all the occupants of this part of the prison
LJSpeech-1.1/wavs/LJ005-0073.wav|To its efforts, and their effect upon Parliament and the public mind, we must attribute the new Jail Acts of four George the fourth
LJSpeech-1.1/wavs/LJ003-0166.wav|association at one time forbidden by custom, but which greed and rapacity long made the rule.
LJSpeech-1.1/wavs/LJ028-0076.wav|However, several decades ago, an Oriental appeared at the Berlin Museum,
LJSpeech-1.1/wavs/LJ012-0253.wav|A further discovery was made in an osier bed near Cold Harbor Lane, Camberwell,
LJSpeech-1.1/wavs/LJ024-0053.wav|Fundamentally, if in the future, America cannot trust the Congress it elects to refrain from abuse of our Constitutional usages
LJSpeech-1.1/wavs/LJ032-0069.wav|The person having access to the box then takes the notice to the window and is given the package.
LJSpeech-1.1/wavs/LJ037-0082.wav|On the evening of November twenty-two,
LJSpeech-1.1/wavs/LJ040-0085.wav|John Pic, however, did not think her position was worse than that of many other people.
LJSpeech-1.1/wavs/LJ028-0099.wav|the first-born son of Nabopolassar, King of Babylon, am I.
LJSpeech-1.1/wavs/LJ004-0170.wav|The only ventilation of this pit, this "dark, cheerless, damp, unwholesome cavern -- a dungeon in its worst sense"
LJSpeech-1.1/wavs/LJ022-0110.wav|The key men for the major responsibilities of this great task already have been selected.
LJSpeech-1.1/wavs/LJ024-0116.wav|When the time comes for action,
LJSpeech-1.1/wavs/LJ040-0161.wav|Dr. Hartogs recommended that Oswald be placed on probation on condition that he seek help and guidance through a child guidance clinic.
LJSpeech-1.1/wavs/LJ032-0266.wav|Paul M. Stombaugh, of the FBI Laboratory,
LJSpeech-1.1/wavs/LJ006-0086.wav|his place is assigned among the most depraved, the most experienced, and the most incorrigible offenders in the middle yard.
|
TensorFlow/Classification/ConvNets/utils | utils | bind_dgx_a100 | #!/bin/bash
if [[ -v SLURM_LOCALID ]]; then
echo "Bind using slurm localid"
LOCAL_ID=$SLURM_LOCALID
elif [[ -v OMPI_COMM_WORLD_LOCAL_RANK ]]; then
echo "Bind using OpenMPI env"
LOCAL_ID=$OMPI_COMM_WORLD_LOCAL_RANK
else
echo "Bind to first node"
LOCAL_ID=0
fi
case $LOCAL_ID in
0|1) exec numactl --cpunodebind=3 --membind=3 $@;;
2|3) exec numactl --cpunodebind=1 --membind=1 $@;;
4|5) exec numactl --cpunodebind=7 --membind=7 $@;;
6|7) exec numactl --cpunodebind=5 --membind=5 $@;;
*) echo "unknown binding"; exec $@;;
esac
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/synthesizer | synthesizer | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from .base_synthesizer import BaseSynthesizer
from .configuration_graph_synthesizer import ConfigurationGraphSynthesizer
|
TensorFlow2/Detection/Efficientdet/visualize | visualize | vis_utils | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import abc
import collections
import matplotlib
matplotlib.use('Agg') # Set headless-friendly backend.
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from visualize import shape_utils
from visualize import standard_fields as fields
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4-6]: masks (optional)
[4-6]: keypoints (optional)
[4-6]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be: image - uint8 numpy array
with shape (img_height, img_width, 3). boxes - a numpy array of shape
[N, 4]. classes - a numpy array of shape [N]. scores - a numpy array of
shape [N] or None. -- Optional positional arguments -- instance_masks -
a numpy array of shape [N, image_height, image_width]. keypoints - a
numpy array of shape [N, num_keypoints, 2]. track_ids - a numpy array of
shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes). Default is
True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates). Default is
True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if key != input_data_fields.original_image and key != input_data_fields.image_additional_channels:
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat(
[images_with_detections, images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat(
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates, keypoint_edges,
keypoint_edge_color, keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color,
fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with values
between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then this
function assumes that the boxes to be plotted are groundtruth boxes and
plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can be None
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as normalized
coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all
boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[(prime_multipler *
track_ids[i]) %
len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[classes[i] %
len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image, box_to_instance_masks_map[box], color=color)
if instance_boundaries is not None:
draw_mask_on_image_array(
image, box_to_instance_boundaries_map[box], color='red', alpha=1.0)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (
np.arange(cumulative_values.size, dtype=np.float32) /
cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.frombuffer(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.frombuffer(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image',
keypoint_edges=None):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates). Default is
True.
summary_name_prefix: A string prefix for each image summary.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._keypoint_edges = keypoint_edges
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
# pyformat: disable
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
# pyformat: enable
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4), # pyformat: disable
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
update_op = self.add_images([[images[0]]]) # pylint: disable=assignment-from-none
image_tensors = get_images()
else:
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(get_images, [],
[tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right',
keypoint_edges=None):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix,
keypoint_edges=keypoint_edges)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(eval_dict, self._category_index,
self._max_boxes_to_draw,
self._min_score_thresh,
self._use_normalized_coordinates,
self._keypoint_edges)
|
PyTorch/Recommendation/DLRM/notebooks | notebooks | DLRM_Triton_inference_demo | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# Copyright 2019 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
#
# # DLRM Triton Inference Demo
# ## Overview
#
# Recomendation system (RecSys) inference involves determining an ordered list of items with which the query user will most likely interact with. For very large commercial databases with millions to hundreds of millions of items to choose from (like advertisements, apps), usually an item retrieval procedure is carried out to reduce the number of items to a more manageable quantity, e.g. a few hundreds to a few thousands. The methods include computationally-light algorithms such as approximate neighborhood search, random forest and filtering based on user preferences. From thereon, a deep learning based RecSys is invoked to re-rank the items and those with the highest scores are presented to the users. This process is well demonstrated in the Google AppStore recommendation system in Figure 1.
#
# 
#
# Figure 1: Google’s app recommendation process. [Source](https://arxiv.org/pdf/1606.07792.pdf).
#
# As we can see, for each query user, the number of user-item pairs to score can be as large as a few thousands. This places an extremely heavy duty on RecSys inference server, which must handle high throughput to serve many users concurrently yet at low latency to satisfy stringent latency thresholds of online commerce engines.
#
# The NVIDIA Triton Inference Server [9] provides a cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or GRPC endpoint, allowing remote clients to request inferencing for any model being managed by the server. Triton automatically manages and makes use of all the available GPUs.
#
# We will next see how to prepare the DLRM model for inference with the Triton inference server and see how Triton is up to the task.
#
# ### Learning objectives
#
# This notebook demonstrates the steps for preparing a pre-trained DLRM model for deployment and inference with the NVIDIA [Triton inference server](https://github.com/NVIDIA/triton-inference-server).
#
# ## Content
# 1. [Requirements](#1)
# 1. [Prepare model for inference](#2)
# 1. [Start the Triton inference server](#3)
# 1. [Testing server with the performance client](#4)
#
# <a id="1"></a>
# ## 1. Requirements
#
#
# ### 1.1 Docker container
# The most convenient way to make use of the NVIDIA DLRM model is via a docker container, which provides a self-contained, isolated and re-producible environment for all experiments.
#
# First, clone the repository:
#
# ```
# git clone https://github.com/NVIDIA/DeepLearningExamples
# cd DeepLearningExamples/PyTorch/Recommendation/DLRM
# ```
#
# To execute this notebook, first build the following inference container:
#
# ```
# docker build -t dlrm-inference . -f triton/Dockerfile
# ```
#
# Start in interactive docker session with:
#
# ```
# docker run -it --rm --gpus device=0 --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 --net=host -v <PATH_TO_SAVED_MODEL>:/models -v <PATH_TO_EXPORT_MODEL>:/repository <PATH_TO_PREPROCESSED_DATA>:/data dlrm-inference bash
# ```
# where:
#
# - PATH_TO_SAVED_MODEL: directory containing the trained DLRM models with `.pt` extension.
#
# - PATH_TO_EXPORT_MODEL: directory which will contain the converted model to be used with the NVIDIA Triton inference server.
#
# - PATH_TO_PREPROCESSED_DATA: path to the preprocessed Criteo Terabyte dataset containing 3 binary data files: `test_data.bin`, `train_data.bin` and `val_data.bin` and a JSON `file model_size.json` totalling ~650GB.
#
# Within the docker interactive bash session, start Jupyter with
#
# ```
# export PYTHONPATH=/workspace/dlrm
# jupyter notebook --ip 0.0.0.0 --port 8888
# ```
#
# Then open the Jupyter GUI interface on your host machine at http://localhost:8888. Within the container, this demo notebook is located at `/workspace/dlrm/notebooks`.
#
# ### 1.2 Hardware
# This notebook can be executed on any CUDA-enabled NVIDIA GPU with at least 24GB of GPU memory, although for efficient mixed precision inference, a [Tensor Core NVIDIA GPU](https://www.nvidia.com/en-us/data-center/tensorcore/) is desired (Volta, Turing or newer architectures).
# In[3]:
get_ipython().system('nvidia-smi')
# <a id="2"></a>
# ## 2. Prepare model for inference
#
# We first convert model to a format accepted by the NVIDIA Triton inference server. Triton can accept TorchScript, ONNX amongst other formats.
#
# To deploy model into Triton compatible format, we provide the deployer.py [script](../triton/deployer.py).
#
# ### TorchScript
# TorchScript is a way to create serializable and optimizable models from PyTorch code. Any TorchScript program can be saved from a Python process and loaded in a process where there is no Python dependency.
#
# We provide two options to convert models to TorchScript:
# - --ts-script convert to torchscript using torch.jit.script
# - --ts-trace convert to torchscript using torch.jit.trace
#
#
# In the conversion below, we assume:
#
# - The trained model is stored at /models/dlrm_model_fp16.pt
#
# - The maximum batchsize that Triton will handle is 65536.
#
# - The processed dataset directory is /data which contain a `model_size.json` file.
# In[12]:
get_ipython().run_cell_magic('bash', '', 'python ../triton/deployer.py \\\n--ts-script \\\n--triton-model-name dlrm-ts-script-16 \\\n--triton-max-batch-size 65536 \\\n--save-dir /repository \\\n-- --model_checkpoint /models/dlrm_model_fp16.pt \\\n--fp16 \\\n--batch_size 4096 \\\n--num_numerical_features 13 \\\n--embedding_dim 128 \\\n--top_mlp_sizes 1024 1024 512 256 1 \\\n--bottom_mlp_sizes 512 256 128 \\\n--interaction_op dot \\\n--hash_indices \\\n--dataset /data \\\n--dump_perf_data ./perfdata\n')
# ### ONNX
#
# [ONNX](https://onnx.ai/) is an open format built to represent machine learning models. ONNX defines a common set of operators - the building blocks of machine learning and deep learning models - and a common file format to enable AI developers to use models with a variety of frameworks, tools, runtimes, and compilers.
#
# Conversion of DLRM pre-trained PyTorch model to ONNX model can be done with:
# In[6]:
get_ipython().run_cell_magic('bash', '', 'python ../triton/deployer.py \\\n--onnx \\\n--triton-model-name dlrm-onnx-16 \\\n--triton-max-batch-size 4096 \\\n--save-dir /repository \\\n-- --model_checkpoint /models/dlrm_model_fp16.pt \\\n--fp16 \\\n--batch_size 4096 \\\n--num_numerical_features 13 \\\n--embedding_dim 128 \\\n--top_mlp_sizes 1024 1024 512 256 1 \\\n--bottom_mlp_sizes 512 256 128 \\\n--interaction_op dot \\\n--hash_indices \\\n--dataset /data \\\n--dump_perf_data ./perfdata\n')
# <a id="3"></a>
# ## 3. Start the Triton inference server
# *Note: this step must be done outside the of the current docker container.*
#
# Open a bash window on the **host machine** and execute the following commands:
#
# ```
# docker pull nvcr.io/nvidia/tensorrtserver:20.03-py3
# docker run -d --rm --gpus device=0 --ipc=host --network=host -p 8000:8000 -p 8001:8001 -p 8002:8002 -v <PATH_TO_MODEL_REPOSITORY>:/repository nvcr.io/nvidia/tensorrtserver:20.03-py3 trtserver --model-store=/repository --log-verbose=1 --model-control-mode=explicit
# ```
#
# where:
#
# - PATH_TO_MODEL_REPOSITORY: directory on the host machine containing the converted models in section 2 above.
#
# Note that each DLRM model will require ~19GB of GPU memory.
#
# Within the `/models` directory on the inference server, the structure should look similar to the below:
#
# ```
# /models
# `-- dlrm-onnx-16
# |-- 1
# | `-- model.onnx
# | |-- bottom_mlp.0.weight
# | |-- bottom_mlp.2.weight
# | |-- bottom_mlp.4.weight
# | |-- embeddings.0.weight
# | |-- embeddings.1.weight
# | |-- embeddings.10.weight
# | |-- embeddings.11.weight
# | |-- embeddings.12.weight
# | |-- embeddings.13.weight
# | |-- embeddings.14.weight
# | |-- embeddings.15.weight
# | |-- embeddings.17.weight
# | |-- embeddings.18.weight
# | |-- embeddings.19.weight
# | |-- embeddings.2.weight
# | |-- embeddings.20.weight
# | |-- embeddings.21.weight
# | |-- embeddings.22.weight
# | |-- embeddings.23.weight
# | |-- embeddings.24.weight
# | |-- embeddings.25.weight
# | |-- embeddings.3.weight
# | |-- embeddings.4.weight
# | |-- embeddings.6.weight
# | |-- embeddings.7.weight
# | |-- embeddings.8.weight
# | |-- embeddings.9.weight
# | |-- model.onnx
# | |-- top_mlp.0.weight
# | |-- top_mlp.2.weight
# | |-- top_mlp.4.weight
# | `-- top_mlp.6.weight
# `-- config.pbtxt
# ```
# <a id="4"></a>
# ## 4. Testing server with the performance client
#
# After model deployment has completed, we can test the deployed model against the Criteo test dataset.
#
# Note: This requires mounting the Criteo test data to, e.g. `/data/test_data.bin`. Within the dataset directory, there must also be a `model_size.json` file.
# In[9]:
get_ipython().run_cell_magic('bash', '', 'python ../triton/client.py \\\n--triton-server-url localhost:8000 \\\n--protocol HTTP \\\n--triton-model-name dlrm-onnx-16 \\\n--num_numerical_features 13 \\\n--dataset_config /data/model_size.json \\\n--inference_data /data/test_data.bin \\\n--batch_size 4096 \\\n--fp16\n')
# The Triton inference server comes with a [performance client](https://docs.nvidia.com/deeplearning/sdk/triton-inference-server-master-branch-guide/docs/optimization.html#perf-client) which is designed to stress test the server using multiple client threads.
#
# The perf_client generates inference requests to your model and measures the throughput and latency of those requests. To get representative results, the perf_client measures the throughput and latency over a time window, and then repeats the measurements until it gets stable values. By default the perf_client uses average latency to determine stability but you can use the --percentile flag to stabilize results based on that confidence level. For example, if --percentile=95 is used the results will be stabilized using the 95-th percentile request latency.
#
# ### Request Concurrency
#
# By default perf_client measures your model’s latency and throughput using the lowest possible load on the model. To do this perf_client sends one inference request to the server and waits for the response. When that response is received, the perf_client immediately sends another request, and then repeats this process during the measurement windows. The number of outstanding inference requests is referred to as the request concurrency, and so by default perf_client uses a request concurrency of 1.
#
# Using the --concurrency-range <start>:<end>:<step> option you can have perf_client collect data for a range of request concurrency levels. Use the --help option to see complete documentation for this and other options.
#
#
# In[13]:
get_ipython().run_cell_magic('bash', '', '/workspace/install/bin/perf_client \\\n--max-threads 10 \\\n-m dlrm-onnx-16 \\\n-x 1 \\\n-p 5000 \\\n-v -i gRPC \\\n-u localhost:8001 \\\n-b 4096 \\\n-l 5000 \\\n--concurrency-range 1:10 \\\n--input-data ./perfdata \\\n-f result.csv\n')
# ### Visualizing Latency vs. Throughput
#
# The perf_client provides the -f option to generate a file containing CSV output of the results.
# You can import the CSV file into a spreadsheet to help visualize the latency vs inferences/second tradeoff as well as see some components of the latency. Follow these steps:
# - Open this [spreadsheet](https://docs.google.com/spreadsheets/d/1IsdW78x_F-jLLG4lTV0L-rruk0VEBRL7Mnb-80RGLL4)
#
# - Make a copy from the File menu “Make a copy…”
#
# - Open the copy
#
# - Select the A1 cell on the “Raw Data” tab
#
# - From the File menu select “Import…”
#
# - Select “Upload” and upload the file
#
# - Select “Replace data at selected cell” and then select the “Import data” button
#
# 
#
# # Conclusion
#
# In this notebook, we have walked through the complete process of preparing the pretrained DLRM for inference with the Triton inference server. Then, we stress test the server with the performance client to verify inference throughput.
#
# ## What's next
# Now it's time to deploy your own DLRM model with Triton.
# In[ ]:
|
PyTorch/Translation/GNMT/scripts | scripts | wmt16_en_de | #! /usr/bin/env bash
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
set -e
export LANG=C.UTF-8
export LC_ALL=C.UTF-8
OUTPUT_DIR=${1:-"data/wmt16_de_en"}
echo "Writing to ${OUTPUT_DIR}. To change this, set the OUTPUT_DIR environment variable."
OUTPUT_DIR_DATA="${OUTPUT_DIR}/data"
mkdir -p $OUTPUT_DIR_DATA
echo "Downloading Europarl v7. This may take a while..."
wget -nc -nv -O ${OUTPUT_DIR_DATA}/europarl-v7-de-en.tgz \
http://www.statmt.org/europarl/v7/de-en.tgz
echo "Downloading Common Crawl corpus. This may take a while..."
wget -nc -nv -O ${OUTPUT_DIR_DATA}/common-crawl.tgz \
http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz
echo "Downloading News Commentary v11. This may take a while..."
wget -nc -nv -O ${OUTPUT_DIR_DATA}/nc-v11.tgz \
http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz
echo "Downloading dev/test sets"
wget -nc -nv -O ${OUTPUT_DIR_DATA}/dev.tgz \
http://data.statmt.org/wmt16/translation-task/dev.tgz
wget -nc -nv -O ${OUTPUT_DIR_DATA}/test.tgz \
http://data.statmt.org/wmt16/translation-task/test.tgz
# Extract everything
echo "Extracting all files..."
mkdir -p "${OUTPUT_DIR_DATA}/europarl-v7-de-en"
tar -xvzf "${OUTPUT_DIR_DATA}/europarl-v7-de-en.tgz" -C "${OUTPUT_DIR_DATA}/europarl-v7-de-en"
mkdir -p "${OUTPUT_DIR_DATA}/common-crawl"
tar -xvzf "${OUTPUT_DIR_DATA}/common-crawl.tgz" -C "${OUTPUT_DIR_DATA}/common-crawl"
mkdir -p "${OUTPUT_DIR_DATA}/nc-v11"
tar -xvzf "${OUTPUT_DIR_DATA}/nc-v11.tgz" -C "${OUTPUT_DIR_DATA}/nc-v11"
mkdir -p "${OUTPUT_DIR_DATA}/dev"
tar -xvzf "${OUTPUT_DIR_DATA}/dev.tgz" -C "${OUTPUT_DIR_DATA}/dev"
mkdir -p "${OUTPUT_DIR_DATA}/test"
tar -xvzf "${OUTPUT_DIR_DATA}/test.tgz" -C "${OUTPUT_DIR_DATA}/test"
# Concatenate Training data
cat "${OUTPUT_DIR_DATA}/europarl-v7-de-en/europarl-v7.de-en.en" \
"${OUTPUT_DIR_DATA}/common-crawl/commoncrawl.de-en.en" \
"${OUTPUT_DIR_DATA}/nc-v11/training-parallel-nc-v11/news-commentary-v11.de-en.en" \
> "${OUTPUT_DIR}/train.en"
wc -l "${OUTPUT_DIR}/train.en"
cat "${OUTPUT_DIR_DATA}/europarl-v7-de-en/europarl-v7.de-en.de" \
"${OUTPUT_DIR_DATA}/common-crawl/commoncrawl.de-en.de" \
"${OUTPUT_DIR_DATA}/nc-v11/training-parallel-nc-v11/news-commentary-v11.de-en.de" \
> "${OUTPUT_DIR}/train.de"
wc -l "${OUTPUT_DIR}/train.de"
# Clone Moses
if [ ! -d "${OUTPUT_DIR}/mosesdecoder" ]; then
echo "Cloning moses for data processing"
git clone https://github.com/moses-smt/mosesdecoder.git "${OUTPUT_DIR}/mosesdecoder"
cd ${OUTPUT_DIR}/mosesdecoder
git reset --hard 8c5eaa1a122236bbf927bde4ec610906fea599e6
cd -
fi
# Convert SGM files
# Convert newstest2014 data into raw text format
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/dev/dev/newstest2014-deen-src.de.sgm \
> ${OUTPUT_DIR_DATA}/dev/dev/newstest2014.de
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/dev/dev/newstest2014-deen-ref.en.sgm \
> ${OUTPUT_DIR_DATA}/dev/dev/newstest2014.en
# Convert newstest2015 data into raw text format
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/dev/dev/newstest2015-deen-src.de.sgm \
> ${OUTPUT_DIR_DATA}/dev/dev/newstest2015.de
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/dev/dev/newstest2015-deen-ref.en.sgm \
> ${OUTPUT_DIR_DATA}/dev/dev/newstest2015.en
# Convert newstest2016 data into raw text format
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/test/test/newstest2016-deen-src.de.sgm \
> ${OUTPUT_DIR_DATA}/test/test/newstest2016.de
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/test/test/newstest2016-deen-ref.en.sgm \
> ${OUTPUT_DIR_DATA}/test/test/newstest2016.en
# Copy dev/test data to output dir
cp ${OUTPUT_DIR_DATA}/dev/dev/newstest20*.de ${OUTPUT_DIR}
cp ${OUTPUT_DIR_DATA}/dev/dev/newstest20*.en ${OUTPUT_DIR}
cp ${OUTPUT_DIR_DATA}/test/test/newstest20*.de ${OUTPUT_DIR}
cp ${OUTPUT_DIR_DATA}/test/test/newstest20*.en ${OUTPUT_DIR}
# Tokenize data
for f in ${OUTPUT_DIR}/*.de; do
echo "Tokenizing $f..."
${OUTPUT_DIR}/mosesdecoder/scripts/tokenizer/tokenizer.perl -q -l de -threads 8 < $f > ${f%.*}.tok.de
done
for f in ${OUTPUT_DIR}/*.en; do
echo "Tokenizing $f..."
${OUTPUT_DIR}/mosesdecoder/scripts/tokenizer/tokenizer.perl -q -l en -threads 8 < $f > ${f%.*}.tok.en
done
# Clean all corpora
for f in ${OUTPUT_DIR}/*.en; do
fbase=${f%.*}
echo "Cleaning ${fbase}..."
${OUTPUT_DIR}/mosesdecoder/scripts/training/clean-corpus-n.perl $fbase de en "${fbase}.clean" 1 80
done
# Create dev dataset
cat "${OUTPUT_DIR}/newstest2015.tok.clean.en" \
"${OUTPUT_DIR}/newstest2016.tok.clean.en" \
> "${OUTPUT_DIR}/newstest_dev.tok.clean.en"
cat "${OUTPUT_DIR}/newstest2015.tok.clean.de" \
"${OUTPUT_DIR}/newstest2016.tok.clean.de" \
> "${OUTPUT_DIR}/newstest_dev.tok.clean.de"
# Filter datasets
python3 scripts/filter_dataset.py \
-f1 ${OUTPUT_DIR}/train.tok.clean.en \
-f2 ${OUTPUT_DIR}/train.tok.clean.de
python3 scripts/filter_dataset.py \
-f1 ${OUTPUT_DIR}/newstest_dev.tok.clean.en \
-f2 ${OUTPUT_DIR}/newstest_dev.tok.clean.de
# Generate Subword Units (BPE)
# Learn Shared BPE
for merge_ops in 32000; do
echo "Learning BPE with merge_ops=${merge_ops}. This may take a while..."
cat "${OUTPUT_DIR}/train.tok.clean.de" "${OUTPUT_DIR}/train.tok.clean.en" | \
subword-nmt learn-bpe -s $merge_ops > "${OUTPUT_DIR}/bpe.${merge_ops}"
echo "Apply BPE with merge_ops=${merge_ops} to tokenized files..."
for lang in en de; do
for f in ${OUTPUT_DIR}/*.tok.${lang} ${OUTPUT_DIR}/*.tok.clean.${lang}; do
outfile="${f%.*}.bpe.${merge_ops}.${lang}"
subword-nmt apply-bpe -c "${OUTPUT_DIR}/bpe.${merge_ops}" < $f > "${outfile}"
echo ${outfile}
done
done
# Create vocabulary file for BPE
cat "${OUTPUT_DIR}/train.tok.clean.bpe.${merge_ops}.en" "${OUTPUT_DIR}/train.tok.clean.bpe.${merge_ops}.de" | \
subword-nmt get-vocab | cut -f1 -d ' ' > "${OUTPUT_DIR}/vocab.bpe.${merge_ops}"
done
echo "All done."
|
PyTorch/Classification/GPUNet/triton/225ms-D/runner | runner | start_NVIDIA-DGX-A100-(1x-A100-80GB) | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Evaluate Runner
python3 -m "triton.225ms-D.runner.__main__" \
--config-path "triton/225ms-D/runner/config_NVIDIA-DGX-A100-(1x-A100-80GB).yaml" \
--device 0 |
PyTorch/Forecasting/TFT/triton/runner | runner | start_NVIDIA-T4 | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Install Docker
. /etc/os-release && \
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \
echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \
curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \
apt-get update && \
apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2
# Install packages
pip install -r triton/runner/requirements.txt
# Evaluate Runner
python3 -m "triton.runner.__main__" \
--config-path "triton/runner/config_NVIDIA-T4.yaml" \
--device 0 |
PyTorch/SpeechSynthesis/FastPitch/scripts | scripts | train_benchmark | #!/usr/bin/env bash
set -a
: ${AMP:=false}
: ${NUM_GPUS_SEQUENCE:="1 4 8"}
: ${EPOCHS:=30}
: ${OUTPUT_DIR:="./output"}
: ${BATCH_SIZE:=16}
for NUM_GPUS in $NUM_GPUS_SEQUENCE ; do
GRAD_ACCUMULATION=$((256 / $BATCH_SIZE / $NUM_GPUS ))
LOG_FILE=$OUTPUT_DIR/perf-train_amp-${AMP}_${NUM_GPUS}x${BATCH_SIZE}x${GRAD_ACCUMULATION}.json
BMARK_EPOCHS=$((EPOCHS * 2 / 3 * $NUM_GPUS / 8)) # 2/3 of EPOCHS
EPOCHS=$((EPOCHS * $NUM_GPUS / 8)) bash scripts/train.sh "$@" --benchmark-epochs-num $BMARK_EPOCHS
rm -f $OUTPUT_DIR/FastPitch*.pt
done
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp | trtis_cpp | run_trtis_server | #!/bin/bash
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
IMAGE_NAME="trt-tacotron2-waveglow.trtis"
NVIDIA_VISIBLE_DEVICES="${CUDA_VISIBLE_DEVICES:-all}"
nvidia-docker run \
--rm \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-p8000:8000 \
-p8001:8001 \
-p8002:8002 \
-e "NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES}" \
-e "LD_LIBRARY_PATH=/opt/tensorrtserver/lib" \
"${IMAGE_NAME}" trtserver --model-store=/models
|
PyTorch/Detection/Efficientdet/data | data | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dataset import CocoDetection
from .transforms import *
from .loader import create_loader
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks | networks | transformer_encoder_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transformer-based text encoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.networks import transformer_encoder
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class TransformerEncoderTest(keras_parameterized.TestCase):
def test_network_creation(self):
hidden_size = 32
sequence_length = 21
# Create a small TransformerEncoder for testing.
test_network = transformer_encoder.TransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
sequence_length=sequence_length,
num_attention_heads=2,
num_layers=3)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
def test_network_creation_with_float16_dtype(self):
hidden_size = 32
sequence_length = 21
tf.keras.mixed_precision.experimental.set_policy("mixed_float16")
# Create a small TransformerEncoder for testing.
test_network = transformer_encoder.TransformerEncoder(
vocab_size=100,
hidden_size=hidden_size,
sequence_length=sequence_length,
num_attention_heads=2,
num_layers=3,
float_dtype="float16")
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the output should always be float16.
self.assertAllEqual(tf.float16, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
tf.keras.mixed_precision.experimental.set_policy("float32")
# Create a small TransformerEncoder for testing.
test_network = transformer_encoder.TransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
sequence_length=sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types)
self.assertTrue(
test_network._position_embedding_layer._use_dynamic_slicing)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
_ = model.predict([word_id_data, mask_data, type_id_data])
# Creates a TransformerEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = transformer_encoder.TransformerEncoder(
vocab_size=vocab_size,
hidden_size=hidden_size,
sequence_length=sequence_length,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=3,
type_vocab_size=num_types)
self.assertTrue(test_network._position_embedding_layer._use_dynamic_slicing)
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
_ = model.predict([word_id_data, mask_data, type_id_data])
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
sequence_length=21,
max_sequence_length=21,
type_vocab_size=12,
intermediate_size=1223,
activation="relu",
dropout_rate=0.05,
attention_dropout_rate=0.22,
initializer="glorot_uniform",
float_dtype="float16")
network = transformer_encoder.TransformerEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = transformer_encoder.TransformerEncoder.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == "__main__":
assert tf.version.VERSION.startswith('2.')
tf.test.main()
|
PyTorch/LanguageModeling/Transformer-XL/pytorch | pytorch | eval | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import math
import os
import pickle
import sys
import time
import warnings
import dllogger
import numpy as np
import torch
import yaml
import data_utils
import utils
from data_utils import get_lm_corpus
from data_utils import tokenize_raw
from utils.exp_utils import AverageMeter
from utils.exp_utils import benchmark
from utils.exp_utils import create_exp_dir
from utils.exp_utils import l2_promote
from utils.exp_utils import log_env_info
def parse_args():
parent_parser = argparse.ArgumentParser(
description='PyTorch Transformer-XL Language Model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
)
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=True)
cfg_parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
cfg_parser.add_argument('--config', default='default')
cfg_parser.add_argument('--config_file', default=None)
config_args, _ = cfg_parser.parse_known_args()
if config_args.config is not None and config_args.config_file is not None:
with open(config_args.config_file) as f:
config = yaml.load(f, Loader=yaml.FullLoader)[config_args.config]['eval']
else:
config = {}
parser.add_argument('--work_dir', default='LM-TFM', type=str,
help='experiment directory')
parser.add_argument('--debug', action='store_true',
help='run in debug mode (do not create exp dir)')
parser.add_argument('--data', type=str, default='../data/wikitext-103',
help='location of the data corpus')
parser.add_argument('--manual', type=str, default=None, nargs='+',
help='run model on raw input data')
parser.add_argument('--dataset', type=str, default='wt103',
choices=['wt103', 'lm1b', 'enwik8', 'text8'],
help='dataset name')
parser.add_argument('--split', type=str, default='all',
choices=['all', 'valid', 'test'],
help='which split to evaluate')
parser.add_argument('--affinity', type=str,
default='single_unique',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
parser.add_argument('--type', type=str, default='pytorch',
choices=['pytorch', 'torchscript'],
help='type of runtime to use')
parser.add_argument('--batch_size', type=int, default=16,
help='batch size')
parser.add_argument('--tgt_len', type=int, default=64,
help='number of tokens to predict')
parser.add_argument('--ext_len', type=int, default=0,
help='length of the extended context')
parser.add_argument('--mem_len', type=int, default=640,
help='length of the retained previous heads')
parser.add_argument('--seed', type=int, default=1111,
help='Random seed')
parser.add_argument('--clamp_len', type=int, default=-1,
help='max positional embedding index')
parser.add_argument('--cuda', action='store_true',
help='Run evaluation on a GPU using CUDA')
parser.add_argument('--model', type=str, default='',
help='path to the checkpoint')
parser.add_argument('--manual_config', type=json.loads, default=None,
help='Manually specify config for the model')
parser.add_argument('--manual_vocab', type=str, default='word',
choices=['word', 'bpe'],
help='Manually specify type of vocabulary')
parser.add_argument('--fp16', action='store_true',
help='Run training in fp16/mixed precision')
parser.add_argument('--log_all_ranks', action='store_true',
help='Enable logging for all distributed ranks')
parser.add_argument('--dllog_file', type=str, default='eval_log.json',
help='Name of the DLLogger output file')
parser.add_argument('--same_length', action='store_true',
help='set same length attention with masking')
parser.add_argument('--no_env', action='store_true',
help='Do not print info on execution env')
parser.add_argument('--log_interval', type=int, default=10,
help='Report interval')
parser.add_argument('--target_perplexity', type=float, default=None,
help='target perplexity')
parser.add_argument('--target_throughput', type=float, default=None,
help='target throughput')
parser.add_argument('--save_data', action='store_true',
help='save latency and throughput data to a file')
parser.add_argument('--repeat', type=int, default=1,
help='loop over the dataset REPEAT times')
parser.add_argument('--max_size', type=int, default=None,
help='run inference on up to MAX_SIZE batches')
parser.add_argument('--percentiles', nargs='+', default=[90, 95, 99],
help='percentiles for latency confidence intervals')
parser.add_argument('--save_torchscript', default=None, type=str,
help='save torchscript model to a file')
parser.add_argument('--load_torchscript', default=None, type=str,
help='load torchscript model from a file')
parser.add_argument('--local_rank', type=int,
default=os.getenv('LOCAL_RANK', 0),
help='Used for multi-process training.')
parser.set_defaults(**config)
args, _ = parser.parse_known_args()
if args.manual:
args.batch_size = 1
if args.same_length and args.tgt_len > args.mem_len:
warnings.warn('--same_length is intended to be used with large '
'mem_len relative to tgt_len')
if args.ext_len < 0:
raise RuntimeError('Extended context length must be non-negative')
return args
def load_checkpoint(path):
dst = f'cuda:{torch.cuda.current_device()}'
logging.info(f'Loading checkpoint from {path}')
checkpoint = torch.load(path, map_location=dst)
return checkpoint
def format_log(loss, split, args):
if args.dataset in ['enwik8', 'text8']:
log_str = '| {0} loss {1:5.2f} | {0} bpc {2:9.5f} '.format(
split, loss, loss / math.log(2))
else:
log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format(
split, loss, math.exp(loss))
return log_str
def evaluate(
eval_iter, model, device, meters, log_interval, max_size=None, repeat=1
):
total_len, total_loss = 0, 0.
eval_step = 0
log_throughput = 0
log_latency = 0
log_loss = 0
utils.distributed.barrier()
start_time = time.time()
with torch.no_grad():
mems = None
for _ in range(repeat):
for idx, (data, target, seq_len, warm) in enumerate(eval_iter):
if max_size and idx >= max_size:
break
eval_step += 1
utils.distributed.barrier()
start_iter = time.time()
loss, mems = model(data, target, mems)
utils.distributed.barrier()
elapsed = time.time() - start_iter
loss = loss.float().mean()
log_loss += loss.item()
if warm:
total_loss += seq_len * loss.item()
total_len += seq_len
meters['eval_latency'].update(elapsed)
log_latency += elapsed
target_tokens = target.numel()
throughput = target_tokens / elapsed
throughput = utils.distributed.all_reduce_item(throughput, op='sum')
meters['eval_throughput'].update(throughput, elapsed)
log_throughput += throughput
if eval_step % log_interval == 0:
log_throughput /= log_interval
log_latency /= log_interval
log_loss /= log_interval
log_ppl = math.exp(log_loss)
log_str = '| step {:>8d} | batches {:>6d} / {:d} ' \
'| ms/batch {:5.2f} | tok/s {:7.0f} | loss {:5.2f} | ppl {:5.2f}'.format(
eval_step,
idx+1,
eval_iter.n_batch,
log_latency * 1000,
log_throughput,
log_loss,
log_ppl,
)
logging.info(log_str)
dllogger_data = {
'eval_latency': log_latency * 1000,
'eval_throughput': log_throughput,
'eval_loss': log_loss,
'eval_perplexity': log_ppl,
}
dllogger.log(step=tuple([eval_step]), data=dllogger_data)
log_throughput = 0
log_latency = 0
log_loss = 0
utils.distributed.barrier()
total_time = time.time() - start_time
logging.info('Time : {:.2f}s, {:.2f}ms/segment'.format(
total_time, 1000 * total_time / (idx+1)))
avg_loss = total_loss / total_len
avg_loss = utils.distributed.all_reduce_item(avg_loss, op='mean')
return avg_loss
def compile_model(model, device, args):
inp = torch.randint(0, 1000, (args.tgt_len, args.batch_size)).to(device)
tgt = torch.randint(0, 1000, (args.tgt_len, args.batch_size)).to(device)
utils.distributed.barrier()
start = time.time()
with torch.no_grad():
mems = None
for _ in range(2):
_, mems = model(inp, tgt, mems)
utils.distributed.barrier()
stop = time.time()
logging.info(f'Building the model took {stop - start:.2f} seconds')
def main():
args = parse_args()
if args.affinity != 'disabled':
nproc_per_node = torch.cuda.device_count()
affinity = utils.gpu_affinity.set_affinity(
args.local_rank,
nproc_per_node,
args.affinity
)
print(f'{args.local_rank}: thread affinity: {affinity}')
if args.type == 'pytorch':
from mem_transformer import MemTransformerLM
else:
from inference.mem_transformer_jit import MemTransformerLM
torch.cuda.set_device(args.local_rank)
l2_promote()
device = torch.device('cuda' if args.cuda else 'cpu')
utils.distributed.init_distributed(args.cuda)
with utils.distributed.sync_workers() as rank:
if rank == 0:
create_exp_dir(args.work_dir, debug=args.debug)
# Setup logging
if args.log_all_ranks:
log_file = f'eval_log_rank_{utils.distributed.get_rank()}.log'
else:
log_file = f'eval_log.log'
dllog_file = args.dllog_file
log_file = os.path.join(args.work_dir, log_file)
dllog_file = os.path.join(args.work_dir, dllog_file)
if args.debug:
log_file = os.devnull
dllog_file = os.devnull
utils.exp_utils.setup_logging(log_all_ranks=args.log_all_ranks,
filename=log_file,
filemode='a',
)
utils.exp_utils.setup_dllogger(enabled=True, filename=dllog_file)
logging.info(args)
dllogger.log(step='PARAMETER', data=vars(args))
dllogger.metadata('eval_throughput', {'unit': 'tokens/s'})
dllogger.metadata('eval_loss', {'unit': None})
dllogger.metadata('eval_perplexity', {'unit': None})
dllogger.metadata('eval_latency', {'unit': 'ms'})
dllogger.metadata('eval_avg_latency', {'unit': 'ms'})
for p in args.percentiles:
dllogger.metadata(f'eval_{p}%_latency', {'unit': 'ms'})
if not args.no_env:
log_env_info()
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.model:
model_path = args.model
elif args.work_dir:
model_path = os.path.join(args.work_dir, 'checkpoint_best.pt')
else:
raise RuntimeError('Specify path to checkpoint using --model or --work_dir')
if not args.manual_config:
checkpoint = load_checkpoint(model_path)
vocab_type = checkpoint['args'].vocab
else:
checkpoint = None
vocab_type = args.manual_vocab
if args.manual:
vocab = checkpoint['vocab']
if hasattr(vocab, 'sym2idx') and not hasattr(vocab, 'unk_idx'):
vocab.unk_idx = vocab.sym2idx['<unk>']
text = " ".join(args.manual)
tokenized = tokenize_raw(text)
symbols = vocab.tokenize(tokenized, add_eos=True)
tensor = vocab.convert_to_tensor(symbols)
iter = data_utils.LMOrderedIterator(tensor, bsz=args.batch_size,
bptt=args.tgt_len, device=device,
ext_len=args.ext_len, warmup=False)
else:
# Load dataset
corpus = get_lm_corpus(args.data, args.dataset, vocab_type)
if args.split == 'valid' or args.split == 'test':
iter = corpus.get_iterator(args.split, args.batch_size, args.tgt_len,
device=device, mem_len=args.mem_len,
ext_len=args.ext_len)
else:
raise RuntimeError('Unknown split')
if args.fp16:
dtype = torch.float16
math_str = 'fp16'
else:
dtype = torch.float32
math_str = 'fp32'
if args.load_torchscript:
model = torch.jit.load(args.load_torchscript)
elif not args.manual_config:
checkpoint['model_config']['tgt_len'] = args.tgt_len
checkpoint['model_config']['ext_len'] = args.ext_len
checkpoint['model_config']['mem_len'] = args.mem_len
checkpoint['model_config']['clamp_len'] = args.clamp_len
checkpoint['model_config']['same_length'] = args.same_length
checkpoint['model_config']['dtype'] = dtype
model = MemTransformerLM(**checkpoint['model_config'])
if args.type == 'pytorch':
model.load_state_dict(checkpoint['model_state'])
elif args.type == 'torchscript':
model.load_state_dict(checkpoint['model_state'], strict=False)
elif args.manual_config:
args.manual_config['tgt_len'] = args.tgt_len
args.manual_config['ext_len'] = args.ext_len
args.manual_config['mem_len'] = args.mem_len
args.manual_config['clamp_len'] = args.clamp_len
args.manual_config['same_length'] = args.same_length
args.manual_config['dtype'] = dtype
model = MemTransformerLM(**args.manual_config)
model = model.eval()
model = model.to(device)
model = model.to(dtype)
if args.type == 'torchscript' and not args.manual_config:
state = checkpoint['model_state']
tie_projs = checkpoint['model_config']['tie_projs']
tie_weight = checkpoint['model_config']['tie_weight']
div_val = checkpoint['model_config']['div_val']
d_model = checkpoint['model_config']['d_model']
d_embed = checkpoint['model_config']['d_embed']
if div_val != 1 or d_model != d_embed:
for i in range(len(model.word_emb.emb_projs)):
model.word_emb.emb_projs[i] = state[f'word_emb.emb_projs.{i}'].to(dtype)
for i in range(len(model.crit.out_projs)):
if div_val == 1:
src = 0
else:
src = i
if model.crit.out_projs[i] is not None:
if tie_projs[i]:
model.crit.out_projs[i] = state[f'word_emb.emb_projs.{src}'].to(dtype)
else:
model.crit.out_projs[i] = state[f'crit.out_projs.{i}'].to(dtype)
for i in range(len(model.crit.out_layers_biases)):
model.crit.out_layers_biases[i] = state[f'crit.out_layers_biases.{i}'].to(dtype)
if tie_weight:
for i in range(len(model.crit.out_layers_weights)):
model.crit.out_layers_weights[i] = state[f'word_emb.emb_layers.{i}.weight'].to(dtype)
else:
for i in range(len(model.crit.out_layers_weights)):
model.crit.out_layers_weights[i] = state[f'crit.out_layers_weights.{i}'].to(dtype)
model = torch.jit.script(model)
if args.type != 'pytorch':
compile_model(model, device, args)
if args.type == 'torchscript' and args.save_torchscript:
torch.jit.save(model, args.save_torchscript)
logging.info(f'Evaluating with: math {math_str} type {args.type} '
f'bsz {args.batch_size} tgt_len {args.tgt_len} '
f'ext_len {args.ext_len} mem_len {args.mem_len} '
f'clamp_len {args.clamp_len}')
meters = {}
warmup = args.mem_len // args.tgt_len + 2
meters['eval_throughput'] = AverageMeter(warmup=warmup, keep=args.save_data)
meters['eval_latency'] = AverageMeter(warmup=warmup, keep=args.save_data)
loss = evaluate(iter, model, device, meters, args.log_interval, args.max_size, args.repeat)
perplexity = math.exp(loss)
log_str = format_log(loss, args.split, args)
summary = {
'eval_loss': loss,
'eval_ppl': perplexity,
}
logging.info('=' * 100)
logging.info(log_str)
logging.info('=' * 100)
if args.save_data:
latency_data = np.array(meters['eval_latency'].vals)
throughput_data = np.array(meters['eval_throughput'].vals)
precision = 'fp16' if args.fp16 else 'fp32'
data_fname = f'eval_data_{args.batch_size}_{precision}_{args.type}'
data_path = os.path.join(args.work_dir, data_fname)
data = {
'args': args,
'throughput': throughput_data,
'latency': latency_data,
}
with open(data_path, 'wb') as f:
pickle.dump(data, f)
avg_throughput = meters['eval_throughput'].avg
logging.info(f'Throughput Avg: {avg_throughput:.2f} tok/s')
logging.info(f'Latency Avg: {1000.0 * latency_data.mean():.2f} ms')
for p in args.percentiles:
logging.info(f'Latency {p}%: {1000.0 * np.percentile(latency_data, p):.2f} ms')
logging.info('=' * 100)
summary.update({
'eval_throughput': avg_throughput,
'eval_avg_latency': 1000 * latency_data.mean(),
})
for p in args.percentiles:
summary[f'eval_{p}%_latency'] = 1000 * np.percentile(latency_data, p)
dllogger.log(step=tuple(), data=summary)
passed = benchmark(target_perplexity=args.target_perplexity,
test_perplexity=perplexity,
target_throughput=args.target_throughput,
test_throughput=meters['eval_throughput'].avg,
)
if not passed:
sys.exit(1)
if __name__ == "__main__":
# Disable profiling executor
try:
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
except AttributeError:
pass
main()
|
TensorFlow/LanguageModeling/BERT/data | data | Downloader | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader
from NVIDIAPretrainedWeightDownloader import NVIDIAPretrainedWeightDownloader
from WikiDownloader import WikiDownloader
from BooksDownloader import BooksDownloader
from GLUEDownloader import GLUEDownloader
from SquadDownloader import SquadDownloader
from PubMedDownloader import PubMedDownloader
class Downloader:
def __init__(self, dataset_name, save_path):
self.dataset_name = dataset_name
self.save_path = save_path
def download(self):
if self.dataset_name == 'bookscorpus':
self.download_bookscorpus()
elif self.dataset_name == 'wikicorpus_en':
self.download_wikicorpus('en')
elif self.dataset_name == 'wikicorpus_zh':
self.download_wikicorpus('zh')
elif self.dataset_name == 'pubmed_baseline':
self.download_pubmed('baseline')
elif self.dataset_name == 'pubmed_daily_update':
self.download_pubmed('daily_update')
elif self.dataset_name == 'pubmed_fulltext':
self.download_pubmed('fulltext')
elif self.dataset_name == 'pubmed_open_access':
self.download_pubmed('open_access')
elif self.dataset_name == 'google_pretrained_weights':
self.download_google_pretrained_weights()
elif self.dataset_name == 'nvidia_pretrained_weights':
self.download_nvidia_pretrained_weights()
elif self.dataset_name == 'mrpc':
self.download_glue(self.dataset_name)
elif self.dataset_name == 'mnli':
self.download_glue(self.dataset_name)
elif self.dataset_name == 'cola':
self.download_glue(self.dataset_name)
elif self.dataset_name == 'sst-2':
self.download_glue(self.dataset_name)
elif self.dataset_name == 'squad':
self.download_squad()
elif self.dataset_name == 'all':
self.download_bookscorpus()
self.download_wikicorpus('en')
self.download_wikicorpus('zh')
self.download_pubmed('baseline')
self.download_pubmed('daily_update')
self.download_pubmed('fulltext')
self.download_pubmed('open_access')
self.download_google_pretrained_weights()
self.download_nvidia_pretrained_weights()
self.download_glue("cola")
self.download_glue("mnli")
self.download_glue("mrpc")
self.download_glue("sst-2")
self.download_squad()
else:
print(self.dataset_name)
assert False, 'Unknown dataset_name provided to downloader'
def download_bookscorpus(self):
downloader = BooksDownloader(self.save_path)
downloader.download()
def download_wikicorpus(self, language):
downloader = WikiDownloader(language, self.save_path)
downloader.download()
def download_pubmed(self, subset):
downloader = PubMedDownloader(subset, self.save_path)
downloader.download()
def download_google_pretrained_weights(self):
downloader = GooglePretrainedWeightDownloader(self.save_path)
downloader.download()
def download_nvidia_pretrained_weights(self):
downloader = NVIDIAPretrainedWeightDownloader(self.save_path)
downloader.download()
def download_glue(self, glue_task_name):
downloader = GLUEDownloader(self.save_path)
downloader.download(glue_task_name)
def download_squad(self):
downloader = SquadDownloader(self.save_path)
downloader.download()
|
PyTorch/Detection/Efficientdet/utils | utils | utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from collections import OrderedDict
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
import torch
from torch import distributed as dist
import logging
import logging.handlers
from .model_ema import ModelEma
_logger = logging.getLogger(__name__)
def reduce_tensor(tensor, n):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= n
return rt
def unwrap_model(model):
if isinstance(model, ModelEma):
return unwrap_model(model.ema)
else:
return model.module if hasattr(model, 'module') else model
def get_state_dict(model, unwrap_fn=unwrap_model):
return unwrap_fn(model).state_dict()
def setup_dllogger(rank, enabled=True, filename='log.json'):
if enabled and rank == 0:
backends = [
StdOutBackend(Verbosity.DEFAULT),
JSONStreamBackend(
Verbosity.VERBOSE,
filename,
),
]
DLLogger.init(backends)
else:
DLLogger.init([])
def get_latest_file(files):
prefix = files[0].split("checkpoint")[0]
max_checkpoint_number = max([int(f.split("checkpoint_")[1].split('.')[0]) for f in files]) # checkpoint_name_convention = checkpoint_ + number + .pth.tar
return prefix + "checkpoint_" + str(max_checkpoint_number) + ".pth.tar"
def get_latest_checkpoint(dir_path):
if not os.path.exists(dir_path):
print("{} does not exist to load checkpoint".format(dir_path))
return None
files = [os.path.join(dir_path, f) for f in sorted(os.listdir(dir_path)) if "checkpoint" in f]
print("... Looking inside {}".format(dir_path))
if len(files) > 0:
return get_latest_file(files)
return None
class FormatterNoInfo(logging.Formatter):
def __init__(self, fmt='%(levelname)s: %(message)s'):
logging.Formatter.__init__(self, fmt)
def format(self, record):
if record.levelno == logging.INFO:
return str(record.getMessage())
return logging.Formatter.format(self, record)
def setup_default_logging(default_level=logging.INFO, log_path=''):
console_handler = logging.StreamHandler()
console_handler.setFormatter(FormatterNoInfo())
logging.root.addHandler(console_handler)
logging.root.setLevel(default_level)
if log_path:
file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3)
file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s")
file_handler.setFormatter(file_formatter)
logging.root.addHandler(file_handler)
class CheckpointSaver:
def __init__(
self,
args=None,
checkpoint_dir='',
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.args = args
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
# config
self.checkpoint_dir = checkpoint_dir
self.extension = '.pth.tar'
self.unwrap_fn = unwrap_fn
def save_checkpoint(self, model, optimizer, epoch, scaler=None, model_ema=None, metric=None, is_best=False):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, "tmp" + self.extension)
actual_save_path = os.path.join(self.checkpoint_dir, "checkpoint_" + str(epoch) + self.extension)
self._save(model, optimizer, tmp_save_path, actual_save_path, epoch, scaler, model_ema, metric, is_best)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def _save(self, model, optimizer, tmp_save_path, save_path, epoch, scaler=None, model_ema=None, metric=None, is_best=False):
save_state = {
'epoch': epoch,
'arch': type(model).__name__.lower(),
'state_dict': get_state_dict(model, self.unwrap_fn),
'optimizer': optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if scaler is not None:
save_state['scaler'] = scaler.state_dict()
if model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, tmp_save_path)
os.rename(tmp_save_path, save_path)
if is_best:
shutil.copyfile(
save_path, os.path.join(self.checkpoint_dir, "model_best" + self.extension)
)
self.best_epoch = epoch
self.best_metric = metric
print("Checkpoint saving for {} epoch is done...".format(epoch))
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def freeze_layers_fn(model, freeze_layers=[]):
for name, param in model.named_parameters():
if any(layer in name for layer in freeze_layers):
param.requires_grad = False
def load_state_dict(checkpoint_path, has_module, use_ema=False, remove_params=[]):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
ckpt = checkpoint[state_dict_key]
_logger.info('Restoring model state from checkpoint...')
else:
ckpt = checkpoint
_logger.info('Restoring model state from stete_dict ...')
new_state_dict = OrderedDict()
for k, v in ckpt.items():
if any(remove_str in k for remove_str in remove_params):
continue
# strip `module.` prefix
if not has_module and k.startswith('module'):
name = k[7:]
elif k.startswith('model'):
name = k[6:]
elif has_module and not k.startswith('module'):
name = 'module.' + k
else:
name = k
new_state_dict[name] = v
state_dict = new_state_dict
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict, checkpoint
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True, remove_params=[]):
has_module = hasattr(model, 'module')
if has_module:
_logger.info('model has attribute module...')
else:
_logger.info('model does not have attribute module...')
state_dict, checkpoint = load_state_dict(checkpoint_path, has_module, use_ema, remove_params)
if len(remove_params) > 0:
this_dict = model.state_dict()
this_dict.update(state_dict)
model.load_state_dict(this_dict, strict=strict)
else:
model.load_state_dict(state_dict, strict=strict)
return checkpoint
def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True, remove_params=[]):
resume_epoch = None
checkpoint = load_checkpoint(model, checkpoint_path=checkpoint_path, use_ema=False, strict=False, remove_params=remove_params)
resume_epoch = 0
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch'] + 1
if log_info:
_logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
return checkpoint, resume_epoch |
PyTorch/Classification/GPUNet/triton/065ms/runner | runner | __main__ | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) |
PyTorch/Detection/SSD/examples | examples | SSD300_FP16_4GPU | # This script launches SSD300 training in FP16 on 4 GPUs using 256 batch size (64 per GPU)
# Usage ./SSD300_FP16_4GPU.sh <path to this repository> <path to dataset> <additional flags>
torchrun --nproc_per_node=4 $1/main.py --backbone resnet50 --warmup 300 --bs 64 --data $2 ${@:3}
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_inference_runner | triton_inference_runner | runner | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from ..utils import TritonClientProtocol, parse_server_url
from .grpc import AsyncInferenceRunner as AsyncGRPCRunner
from .grpc import SyncInferenceRunner as SyncGRPCRunner
from .http import AsyncInferenceRunner as AsyncHTPPRunner
from .http import SyncInferenceRunner as SyncHTTPRunner
class TritonInferenceRunner:
async_runners = {
TritonClientProtocol.GRPC: AsyncGRPCRunner,
TritonClientProtocol.HTTP: AsyncHTPPRunner,
}
sync_runners = {
TritonClientProtocol.GRPC: SyncGRPCRunner,
TritonClientProtocol.HTTP: SyncHTTPRunner,
}
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
dataloader_fn,
verbose: bool = False,
response_wait_time: Optional[float] = None,
max_unresponded_requests: int = 128,
synchronous: bool = False,
):
protocol, host, port = parse_server_url(server_url)
server_url = f"{host}:{port}"
if synchronous:
sync_runner_cls = TritonInferenceRunner.sync_runners[protocol]
self._runner = sync_runner_cls(
server_url,
model_name,
model_version,
dataloader=dataloader_fn(),
verbose=verbose,
response_wait_time=response_wait_time,
)
else:
async_runner_cls = TritonInferenceRunner.async_runners[protocol]
self._runner = async_runner_cls(
server_url,
model_name,
model_version,
dataloader=dataloader_fn(),
verbose=verbose,
response_wait_time=response_wait_time,
max_unresponded_requests=max_unresponded_requests,
)
def __iter__(self):
return self._runner.__iter__()
|
PyTorch/LanguageModeling/BART/utils | utils | generation_utils | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from dataclasses import dataclass
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.nn import functional as F
from utils.file_utils import ModelOutput
from utils.generation_beam_search import BeamScorer, BeamSearchScorer
from utils.generation_logits_process import (
EncoderNoRepeatNGramLogitsProcessor,
HammingDiversityLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
logger = logging.getLogger(__name__)
@dataclass
class GreedySearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of
shape :obj:`(batch_size, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class GreedySearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of
shape :obj:`(batch_size, config.vocab_size)`).
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size,
num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class SampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using sampling.
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of
shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(num_return_sequences*batch_size, num_heads, generated_length,
sequence_length)`.
hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(num_return_sequences*batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class SampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of
the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of
shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`).
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape
:obj:`(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences, num_heads, generated_length,
sequence_length)`.
decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam search.
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Final beam scores of the generated ``sequences``.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam
. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape
:obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length,
sequence_length)`.
hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length,
hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights
of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Final beam scores of the generated ``sequences``.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam
. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape
:obj:`(batch_size*num_beams, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size,
num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, num_heads,
generated_length, sequence_length)`.
decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length,
hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam sample.
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Final beam scores of the generated ``sequences``.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam
. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape
:obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length,
sequence_length)`.
hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_beams, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Final beam scores of the generated ``sequences``.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam
. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape
:obj:`(batch_size*num_beams, config.vocab_size)`).
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size,
num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size*num_beams, sequence_length, hidden_size)`.
decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length,
sequence_length)`.
decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput]
SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput]
BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput]
BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput]
class GenerationMixin:
"""
A class containing all of the functions supporting generation, to be used as a mixin in
:class:`~transformers.PreTrainedModel`.
"""
def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]:
"""
Implement in subclasses of :class:`~transformers.PreTrainedModel` for custom behavior to prepare inputs in the
generate method.
"""
return {"input_ids": input_ids}
def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
"""
Implement in subclasses of :class:`~transformers.PreTrainedModel` for custom behavior to adjust the logits in
the generate method.
"""
return logits
def _prepare_input_ids_for_generation(self, bos_token_id: int) -> torch.LongTensor:
if bos_token_id is None:
raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
return torch.ones((1, 1), dtype=torch.long, device=self.device) * bos_token_id
def _prepare_attention_mask_for_generation(
self, input_ids: torch.Tensor, pad_token_id: int, eos_token_id: int
) -> torch.LongTensor:
is_pad_token_in_inputs_ids = (pad_token_id is not None) and (pad_token_id in input_ids)
is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (
(eos_token_id is not None) and (pad_token_id != eos_token_id)
)
if is_pad_token_in_inputs_ids and is_pad_token_not_equal_to_eos_token_id:
return input_ids.ne(pad_token_id).long()
return input_ids.new_ones(input_ids.shape)
def _prepare_encoder_decoder_kwargs_for_generation(
self, input_ids: torch.LongTensor, model_kwargs
) -> Dict[str, Any]:
# retrieve encoder hidden states
encoder = self.get_encoder()
encoder_kwargs = {
argument: value for argument, value in model_kwargs.items() if not argument.startswith("decoder_")
}
model_kwargs["encoder_outputs"]: ModelOutput = encoder(input_ids, return_dict=True, **encoder_kwargs)
return model_kwargs
def _prepare_decoder_input_ids_for_generation(
self, input_ids: torch.LongTensor, decoder_start_token_id: int = None, bos_token_id: int = None
) -> torch.LongTensor:
decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
decoder_input_ids = (
torch.ones((input_ids.shape[0], 1), dtype=input_ids.dtype, device=input_ids.device)
* decoder_start_token_id
)
return decoder_input_ids
def _get_pad_token_id(self, pad_token_id: int = None, eos_token_id: int = None) -> int:
if pad_token_id is None and eos_token_id is not None:
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
pad_token_id = eos_token_id
return pad_token_id
def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
if decoder_start_token_id is not None:
return decoder_start_token_id
elif (
hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "decoder_start_token_id")
and self.config.decoder.decoder_start_token_id is not None
):
return self.config.decoder.decoder_start_token_id
elif bos_token_id is not None:
return bos_token_id
elif (
hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "bos_token_id")
and self.config.decoder.bos_token_id is not None
):
return self.config.decoder.bos_token_id
raise ValueError(
"`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
)
@staticmethod
def _expand_inputs_for_generation(
input_ids: torch.LongTensor,
expand_size: int = 1,
is_encoder_decoder: bool = False,
attention_mask: torch.LongTensor = None,
encoder_outputs: ModelOutput = None,
**model_kwargs,
) -> Tuple[torch.LongTensor, Dict[str, Any]]:
expanded_return_idx = (
torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device)
)
input_ids = input_ids.index_select(0, expanded_return_idx)
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx)
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
if is_encoder_decoder:
assert encoder_outputs is not None
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select(
0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device)
)
model_kwargs["encoder_outputs"] = encoder_outputs
return input_ids, model_kwargs
@staticmethod
def _init_sequence_length_for_generation(
input_ids: torch.LongTensor, max_length: int
) -> Tuple[torch.Tensor, torch.Tensor, int]:
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
sequence_lengths = input_ids.new(input_ids.shape[0]).fill_(max_length)
cur_len = input_ids.shape[-1]
return sequence_lengths, unfinished_sequences, cur_len
@staticmethod
def _update_seq_length_for_generation(
sequence_lengths: torch.LongTensor,
unfinished_sequences: torch.LongTensor,
cur_len: int,
is_eos_in_next_token: torch.BoolTensor,
) -> Tuple[torch.LongTensor, torch.LongTensor]:
# check if sentence is not finished yet
is_sent_unfinished = unfinished_sequences.mul(is_eos_in_next_token.long()).bool()
# update sentence length
sequence_lengths = sequence_lengths.masked_fill(is_sent_unfinished, cur_len)
unfinished_sequences = unfinished_sequences.mul((~is_eos_in_next_token).long())
return sequence_lengths, unfinished_sequences
@staticmethod
def _update_model_kwargs_for_generation(
outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False
) -> Dict[str, Any]:
# update past
if "past_key_values" in outputs:
model_kwargs["past"] = outputs.past_key_values
elif "mems" in outputs:
model_kwargs["past"] = outputs.mems
elif "past_buckets_states" in outputs:
model_kwargs["past"] = outputs.past_buckets_states
else:
model_kwargs["past"] = None
# update token_type_ids with last value
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
# update attention mask
if not is_encoder_decoder:
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
return model_kwargs
def _reorder_cache(self, past, beam_idx):
raise NotImplementedError(
f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to enable beam search for {self.__class__}"
)
def _get_logits_warper(
self, top_k: int = None, top_p: float = None, temperature: float = None, num_beams: int = None
) -> LogitsProcessorList:
"""
This class returns a :obj:`~transformers.LogitsProcessorList` list object that contains all relevant
:obj:`~transformers.LogitsWarper` instances used for multinomial sampling.
"""
# init warp parameters
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
temperature = temperature if temperature is not None else self.config.temperature
# instantiate warpers list
warpers = LogitsProcessorList()
# the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
# all samplers can be found in `generation_utils_samplers.py`
if temperature is not None and temperature != 1.0:
warpers.append(TemperatureLogitsWarper(temperature))
if top_k is not None and top_k != 0:
warpers.append(TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1)))
if top_p is not None and top_p < 1.0:
warpers.append(TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1)))
return warpers
def _get_logits_processor(
self,
repetition_penalty: float,
no_repeat_ngram_size: int,
encoder_no_repeat_ngram_size: int,
encoder_input_ids: torch.LongTensor,
bad_words_ids: List[List[int]],
min_length: int,
eos_token_id: int,
prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]],
num_beams: int,
num_beam_groups: int,
diversity_penalty: float,
) -> LogitsProcessorList:
"""
This class returns a :obj:`~transformers.LogitsProcessorList` list object that contains all relevant
:obj:`~transformers.LogitsProcessor` instances used to modify the scores of the language model head.
"""
# init warp parameters
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
encoder_no_repeat_ngram_size = (
encoder_no_repeat_ngram_size
if encoder_no_repeat_ngram_size is not None
else self.config.encoder_no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
min_length = min_length if min_length is not None else self.config.min_length
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
diversity_penalty = diversity_penalty if diversity_penalty is not None else self.config.diversity_penalty
# instantiate processors list
processors = LogitsProcessorList()
# the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
# all samplers can be found in `generation_utils_samplers.py`
if diversity_penalty is not None and diversity_penalty > 0.0:
processors.append(
HammingDiversityLogitsProcessor(
diversity_penalty=diversity_penalty, num_beams=num_beams, num_beam_groups=num_beam_groups
)
)
if repetition_penalty is not None and repetition_penalty != 1.0:
processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))
if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0:
processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size))
if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0:
if self.config.is_encoder_decoder:
processors.append(EncoderNoRepeatNGramLogitsProcessor(encoder_no_repeat_ngram_size, encoder_input_ids))
else:
raise ValueError(
"It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture"
)
if bad_words_ids is not None:
processors.append(NoBadWordsLogitsProcessor(bad_words_ids, eos_token_id))
if min_length is not None and eos_token_id is not None and min_length > -1:
processors.append(MinLengthLogitsProcessor(min_length, eos_token_id))
if prefix_allowed_tokens_fn is not None:
processors.append(PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, num_beams))
return processors
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
bad_words_ids: Optional[Iterable[int]] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
encoder_no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
num_beam_groups: Optional[int] = None,
diversity_penalty: Optional[float] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
multinomial sampling, beam-search decoding, and beam-search multinomial sampling.
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values
indicated are the default values of those config.
Most of these parameters are explained in more detail in `this blog post
<https://huggingface.co/blog/how-to-generate>`__.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty
:obj:`torch.LongTensor` of shape :obj:`(1,)`.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
temperature (:obj:`float`, `optional`, defaults tp 1.0):
The value used to module the next token probabilities.
top_k (:obj:`int`, `optional`, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (:obj:`float`, `optional`, defaults to 1.0):
If set to float < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or
higher are kept for generation.
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the
model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer
sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
encoder_no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the
``decoder_input_ids``.
bad_words_ids(:obj:`List[List[int]]`, `optional`):
List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer(bad_word,
add_prefix_space=True).input_ids`.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same
shape as :obj:`input_ids` that masks the pad token. `What are attention masks?
<../glossary.html#attention-mask>`__
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
num_beam_groups (:obj:`int`, `optional`, defaults to 1):
Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of
beams. `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details.
diversity_penalty (:obj:`float`, `optional`, defaults to 0.0):
This value is subtracted from a beam's score if it generates a token same as any beam from other group
at a particular time. Note that :obj:`diversity_penalty` is only effective if ``group beam search`` is
enabled.
prefix_allowed_tokens_fn: (:obj:`Callable[[int, torch.Tensor], List[int]]`, `optional`):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments :obj:`inputs_ids` and the batch ID
:obj:`batch_id`. It has to return a list with the allowed tokens for the next generation step
conditioned on the previously generated tokens :obj:`inputs_ids` and the batch ID :obj:`batch_id`. This
argument is useful for constrained generation conditioned on the prefix, as described in
`Autoregressive Entity Retrieval <https://arxiv.org/abs/2010.00904>`__.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If the
model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific
kwargs should be prefixed with `decoder_`.
Return:
:class:`~transformers.file_utils.ModelOutput` or :obj:`torch.LongTensor`: A
:class:`~transformers.file_utils.ModelOutput` (if ``return_dict_in_generate=True`` or when
``config.return_dict_in_generate=True``) or a :obj:`torch.FloatTensor`.
If the model is `not` an encoder-decoder model (``model.config.is_encoder_decoder=False``), the
possible :class:`~transformers.file_utils.ModelOutput` types are:
- :class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput`,
- :class:`~transformers.generation_utils.SampleDecoderOnlyOutput`,
- :class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput`,
- :class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput`
If the model is an encoder-decoder model (``model.config.is_encoder_decoder=True``), the possible
:class:`~transformers.file_utils.ModelOutput` types are:
- :class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput`,
- :class:`~transformers.generation_utils.SampleEncoderDecoderOutput`,
- :class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput`,
- :class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput`
Examples::
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM
>>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
>>> model = AutoModelForCausalLM.from_pretrained("distilgpt2")
>>> # do greedy decoding without providing a prompt
>>> outputs = model.generate(max_length=40)
>>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> document = (
... "at least two people were killed in a suspected bomb attack on a passenger bus "
... "in the strife-torn southern philippines on monday , the military said."
... )
>>> # encode input contex
>>> input_ids = tokenizer(document, return_tensors="pt").input_ids
>>> # generate 3 independent sequences using beam search decoding (5 beams)
>>> # with T5 encoder-decoder model conditioned on short news article.
>>> outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
>>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
>>> model = AutoModelForCausalLM.from_pretrained("distilgpt2")
>>> input_context = "The dog"
>>> # encode input context
>>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids
>>> # generate 3 candidates using sampling
>>> outputs = model.generate(input_ids=input_ids, max_length=20, num_return_sequences=3, do_sample=True)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
>>> tokenizer = AutoTokenizer.from_pretrained("ctrl")
>>> model = AutoModelForCausalLM.from_pretrained("ctrl")
>>> # "Legal" is one of the control codes for ctrl
>>> input_context = "Legal My neighbor is"
>>> # encode input context
>>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids
>>> outputs = model.generate(input_ids=input_ids, max_length=20, repetition_penalty=1.2)
>>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> input_context = "My cute dog"
>>> # get tokens of words that should not be generated
>>> bad_words_ids = [tokenizer(bad_word, add_prefix_space=True).input_ids for bad_word in ["idiot", "stupid", "shut up"]]
>>> # encode input context
>>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids
>>> # generate sequences without allowing bad_words to be generated
>>> outputs = model.generate(input_ids=input_ids, max_length=20, do_sample=True, bad_words_ids=bad_words_ids)
>>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
"""
# set init values
num_beams = num_beams if num_beams is not None else self.config.num_beams
num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups
max_length = max_length if max_length is not None else self.config.max_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
model_kwargs["output_attentions"] = output_attentions
model_kwargs["output_hidden_states"] = output_hidden_states
if input_ids is None:
# init `input_ids` with bos_token_id
input_ids = self._prepare_input_ids_for_generation(bos_token_id)
if model_kwargs.get("attention_mask", None) is None:
# init `attention_mask` depending on `pad_token_id`
model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
input_ids, pad_token_id, eos_token_id
)
# special case if pad_token_id is not defined
if pad_token_id is None and eos_token_id is not None:
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
pad_token_id = eos_token_id
# Storing encoder_input_ids for logits_processor that could use them
encoder_input_ids = input_ids if self.config.is_encoder_decoder else None
if self.config.is_encoder_decoder:
# add encoder_outputs to model_kwargs
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, model_kwargs)
# set input_ids as decoder_input_ids
if "decoder_input_ids" in model_kwargs:
input_ids = model_kwargs.pop("decoder_input_ids")
else:
input_ids = self._prepare_decoder_input_ids_for_generation(
input_ids, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id
)
if "encoder_outputs" not in model_kwargs or not isinstance(model_kwargs["encoder_outputs"], ModelOutput):
raise ValueError("Make sure that `model_kwargs` include `encoder_outputs` of type `ModelOutput`.")
if input_ids.shape[-1] >= max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids.shape[-1]}, but ``max_length`` is set to {max_length}."
"This can lead to unexpected behavior. You should consider increasing ``config.max_length`` or ``max_length``."
)
# determine generation mode
is_greedy_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is False
is_sample_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is True
is_beam_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is False
is_beam_sample_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is True
is_group_beam_gen_mode = (num_beams > 1) and (num_beam_groups > 1)
if num_beam_groups > num_beams:
raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`")
if is_group_beam_gen_mode and do_sample is True:
raise ValueError(
"Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`."
)
# set model_kwargs
model_kwargs["use_cache"] = use_cache
# get distribution pre_processing samplers
logits_processor = self._get_logits_processor(
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
encoder_input_ids=encoder_input_ids,
bad_words_ids=bad_words_ids,
min_length=min_length,
eos_token_id=eos_token_id,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
num_beams=num_beams,
num_beam_groups=num_beam_groups,
diversity_penalty=diversity_penalty,
)
if is_greedy_gen_mode:
if num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search."
)
# greedy search
return self.greedy_search(
input_ids,
logits_processor=logits_processor,
max_length=max_length,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**model_kwargs,
)
elif is_sample_gen_mode:
# get probability distribution warper
logits_warper = self._get_logits_warper(
top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams
)
# expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids,
expand_size=num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# sample
return self.sample(
input_ids,
logits_processor=logits_processor,
logits_warper=logits_warper,
max_length=max_length,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**model_kwargs,
)
elif is_beam_gen_mode:
batch_size = input_ids.shape[0]
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
if num_return_sequences > num_beams:
raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=num_beams,
device=self.device,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
num_beam_hyps_to_keep=num_return_sequences,
)
# interleave with `num_beams`
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
)
return self.beam_search(
input_ids,
beam_scorer,
logits_processor=logits_processor,
max_length=max_length,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**model_kwargs,
)
elif is_beam_sample_gen_mode:
logits_warper = self._get_logits_warper(
top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams
)
batch_size = input_ids.shape[0] * num_return_sequences
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=num_beams,
device=self.device,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
)
# interleave with `num_beams * num_return_sequences`
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids,
expand_size=num_beams * num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
return self.beam_sample(
input_ids,
beam_scorer,
logits_processor=logits_processor,
logits_warper=logits_warper,
max_length=max_length,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**model_kwargs,
)
elif is_group_beam_gen_mode:
batch_size = input_ids.shape[0]
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
if num_return_sequences > num_beams:
raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
if num_beams % num_beam_groups != 0:
raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.")
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
max_length=max_length,
num_beams=num_beams,
device=self.device,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
# interleave with `num_beams`
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
)
return self.group_beam_search(
input_ids,
diverse_beam_scorer,
logits_processor=logits_processor,
max_length=max_length,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**model_kwargs,
)
def greedy_search(
self,
input_ids: torch.LongTensor,
logits_processor: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[GreedySearchOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head using greedy decoding.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty
:obj:`torch.LongTensor` of shape :obj:`(1,)`.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
model_kwargs:
Additional model specific keyword arguments will be forwarded to the :obj:`forward` function of the
model. If model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput`,
:class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForCausalLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
>>> model.config.pad_token_id = model.config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id),
... ])
>>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
max_length = max_length if max_length is not None else self.config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# init sequence length tensors
sequence_lengths, unfinished_sequences, cur_len = self._init_sequence_length_for_generation(
input_ids, max_length
)
while cur_len < max_length:
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = outputs.logits[:, -1, :]
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_logits,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# pre-process distribution
next_tokens_scores = logits_processor(input_ids, next_token_logits)
# argmax
next_tokens = torch.argmax(next_tokens_scores, dim=-1)
# add code that transfomers next_tokens to tokens_to_add
if eos_token_id is not None:
assert pad_token_id is not None, "If eos_token_id is defined, make sure that pad_token_id is defined."
next_tokens = next_tokens * unfinished_sequences + (pad_token_id) * (1 - unfinished_sequences)
# add token and increase length by one
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
# update sequence length
if eos_token_id is not None:
sequence_lengths, unfinished_sequences = self._update_seq_length_for_generation(
sequence_lengths, unfinished_sequences, cur_len, next_tokens == eos_token_id
)
# update model kwargs
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sequences.max() == 0:
break
# increase cur_len
cur_len = cur_len + 1
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return GreedySearchEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return GreedySearchDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return input_ids
def sample(
self,
input_ids: torch.LongTensor,
logits_processor: Optional[LogitsProcessorList] = None,
logits_warper: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[SampleOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head using multinomial sampling.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty
:obj:`torch.LongTensor` of shape :obj:`(1,)`.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
logits_warper (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsWarper` used to warp the prediction score distribution of the language
modeling head applied before multinomial sampling at each generation step.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If
model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utils.SampleDecoderOnlyOutput`,
:class:`~transformers.generation_utils.SampleEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.SampleDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.SampleEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForCausalLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... TopKLogitsWarper,
... TemperatureLogitsWarper,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
>>> model.config.pad_token_id = model.config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id),
... ])
>>> # instantiate logits processors
>>> logits_warper = LogitsProcessorList([
... TopKLogitsWarper(50),
... TemperatureLogitsWarper(0.7),
... ])
>>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
max_length = max_length if max_length is not None else self.config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# init sequence length tensors
sequence_lengths, unfinished_sequences, cur_len = self._init_sequence_length_for_generation(
input_ids, max_length
)
# auto-regressive generation
while cur_len < max_length:
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = outputs.logits[:, -1, :]
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# sample
probs = F.softmax(next_token_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
# add code that transfomers next_tokens to tokens_to_add
if eos_token_id is not None:
assert pad_token_id is not None, "If eos_token_id is defined, make sure that pad_token_id is defined."
next_tokens = next_tokens * unfinished_sequences + (pad_token_id) * (1 - unfinished_sequences)
# add token and increase length by one
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
cur_len = cur_len + 1
# update sequence length
if eos_token_id is not None:
sequence_lengths, unfinished_sequences = self._update_seq_length_for_generation(
sequence_lengths, unfinished_sequences, cur_len, next_tokens == eos_token_id
)
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sequences.max() == 0:
break
# update model kwargs
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return SampleEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return SampleDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return input_ids
def beam_search(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[BeamSearchOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head using beam search decoding.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty
:obj:`torch.LongTensor` of shape :obj:`(1,)`.
beam_scorer (:obj:`BeamScorer`):
An derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are
constructed, stored and sorted during generation. For more information, the documentation of
:class:`~transformers.BeamScorer` should be read.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If
model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utilsBeamSearchDecoderOnlyOutput`,
:class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True)
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... max_length=model.config.max_length,
... num_beams=num_beams,
... device=model.device,
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
... ])
>>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
max_length = max_length if max_length is not None else self.config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
assert (
num_beams * batch_size == batch_beam_size
), "Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = outputs.logits[:, -1, :]
# adjust tokens for Bart, *e.g.*
next_token_logits = self.adjust_logits_during_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
next_token_scores = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_indices = next_tokens // vocab_size
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
cur_len = cur_len + 1
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx)
if beam_scorer.is_done:
break
sequence_outputs = beam_scorer.finalize(
input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def beam_sample(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
logits_warper: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[BeamSampleOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head using beam search with multinomial sampling.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty
:obj:`torch.LongTensor` of shape :obj:`(1,)`.
beam_scorer (:obj:`BeamScorer`):
A derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are
constructed, stored and sorted during generation. For more information, the documentation of
:class:`~transformers.BeamScorer` should be read.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
logits_warper (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsWarper` used to warp the prediction score distribution of the language
modeling head applied before multinomial sampling at each generation step.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If
model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput`,
:class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... TopKLogitsWarper,
... TemperatureLogitsWarper,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True)
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... max_length=model.config.max_length,
... num_beams=num_beams,
... device=model.device,
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)
... ])
>>> # instantiate logits processors
>>> logits_warper = LogitsProcessorList([
... TopKLogitsWarper(50),
... TemperatureLogitsWarper(0.7),
... ])
>>> outputs = model.beam_sample(
... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs
... )
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
max_length = max_length if max_length is not None else self.config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores = beam_scores.view((batch_size * num_beams,))
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = outputs.logits[:, -1, :]
# adjust token scores (a no-op by default)
next_token_logits = self.adjust_logits_during_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
next_token_scores = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores)
next_token_scores = logits_warper(input_ids, next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
probs = F.softmax(next_token_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams)
next_token_scores = torch.gather(next_token_scores, -1, next_tokens)
next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, _indices)
next_indices = next_tokens // vocab_size
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
cur_len = cur_len + 1
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx)
if beam_scorer.is_done:
break
sequence_outputs = beam_scorer.finalize(
input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def group_beam_search(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
):
r"""
Generates sequences for models with a language modeling head using beam search decoding.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty
:obj:`torch.LongTensor` of shape :obj:`(1,)`.
beam_scorer (:obj:`BeamScorer`):
An derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are
constructed, stored and sorted during generation. For more information, the documentation of
:class:`~transformers.BeamScorer` should be read.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
model_kwargs:
Additional model specific kwargs that will be forwarded to the :obj:`forward` function of the model. If
model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput`,
:class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if
:class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... HammingDiversityLogitsProcessor,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run diverse beam search using 6 beams
>>> num_beams = 6
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True)
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... max_length=model.config.max_length,
... num_beams=num_beams,
... device=model.device,
... num_beam_groups=3
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3),
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
... ])
>>> outputs = model.group_beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
max_length = max_length if max_length is not None else self.config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
num_beam_groups = beam_scorer.num_beam_groups
num_sub_beams = num_beams // num_beam_groups
device = input_ids.device
batch_beam_size, cur_len = input_ids.shape
assert (
num_beams * batch_size == batch_beam_size
), f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)
# initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in
# the same group don't produce same tokens everytime.
beam_scores[:, ::num_sub_beams] = 0
beam_scores = beam_scores.view((batch_size * num_beams,))
while cur_len < max_length:
# predicted tokens in cur_len step
current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)
# indices which will form the beams in the next time step
reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)
# do one decoder step on all beams of all sentences in batch
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
for beam_group_idx in range(num_beam_groups):
group_start_idx = beam_group_idx * num_sub_beams
group_end_idx = min(group_start_idx + num_sub_beams, num_beams)
group_size = group_end_idx - group_start_idx
# indices of beams of current group among all sentences in batch
batch_group_indices = []
if output_scores:
processed_score = torch.zeros_like(outputs.logits[:, -1, :])
for batch_idx in range(batch_size):
batch_group_indices.extend(
[batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]
)
group_input_ids = input_ids[batch_group_indices]
# select outputs of beams of current group only
next_token_logits = outputs.logits[batch_group_indices, -1, :]
# adjust tokens for Bart, *e.g.*
next_token_logits = self.adjust_logits_during_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * group_size, vocab_size)
vocab_size = next_token_scores.shape[-1]
next_token_scores = logits_processor(
group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx
)
next_token_scores = next_token_scores + beam_scores[batch_group_indices].unsqueeze(-1).expand_as(
next_token_scores
)
if output_scores:
processed_score[batch_group_indices] = next_token_scores
# reshape for beam search
next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True
)
next_indices = next_tokens // vocab_size
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
group_input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids[batch_group_indices] = group_input_ids[beam_idx]
group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
current_tokens[batch_group_indices] = group_input_ids[:, -1]
# (beam_idx // group_size) -> batch_idx
# (beam_idx % group_size) -> offset of idx inside the group
reordering_indices[batch_group_indices] = (
num_beams * (beam_idx // group_size) + group_start_idx + (beam_idx % group_size)
)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (processed_score,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], reordering_indices)
input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)
cur_len = cur_len + 1
if beam_scorer.is_done:
break
sequence_outputs = beam_scorer.finalize(
input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"]
if self.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def top_k_top_p_filtering(
logits: torch.FloatTensor,
top_k: int = 0,
top_p: float = 1.0,
filter_value: float = -float("Inf"),
min_tokens_to_keep: int = 1,
) -> torch.FloatTensor:
"""
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(
None, logits
)
if 0 <= top_p <= 1.0:
logits = TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=min_tokens_to_keep)(None, logits)
return logits |
TensorFlow/Segmentation/UNet_Industrial/model/layers | layers | normalization | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import inspect
import tensorflow as tf
from model.layers import _log_hparams
__all__ = ['batch_norm']
def batch_norm(
inputs,
decay=0.999,
epsilon=0.001,
scale=False,
center=True,
is_training=True,
data_format='NHWC',
param_initializers=None
):
"""Adds a Batch Normalization layer."""
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
if param_initializers is not None:
for key, initializer in param_initializers.items():
if key not in ['beta', 'gamma', 'moving_mean', 'moving_variance']:
raise ValueError("Unknown key received: `%s`" % key)
if inspect.isclass(initializer):
initializer = initializer()
setattr(param_initializers, key, initializer)
if initializer.__class__.__module__ != 'tensorflow.python.ops.init_ops':
raise ValueError("The object `%s` is not a Tensor initializer" % str(initializer))
input_shape = inputs.get_shape()
input_rank = input_shape.ndims
input_channels = input_shape[1]
if input_rank == 2:
if data_format == 'NCHW':
new_shape = [-1, input_channels, 1, 1]
else:
new_shape = [-1, 1, 1, input_channels]
inputs = tf.reshape(inputs, new_shape)
net = tf.contrib.layers.batch_norm(
inputs,
decay=decay,
scale=scale,
epsilon=epsilon,
is_training=is_training,
trainable=is_training,
fused=True,
data_format=data_format,
center=center,
param_initializers=param_initializers
)
if input_rank == 2:
net = tf.reshape(net, [-1, input_channels])
_log_hparams(
classname='BatchNorm',
layername=net.name,
data_format=data_format,
is_training=is_training,
decay=decay,
epsilon=epsilon,
scale=scale,
center=center,
fused=True,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net |
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit | deployment_toolkit | extensions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
converters = ExtensionManager("converters")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner | runner | executor | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import shutil
import traceback
from typing import Dict, List, Optional
from colorama import Fore
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import BackendAccelerator, Precision
from .core import Batching, Measurement, Paths
from .exceptions import RunnerException
from .experiment import ExperimentResult, ExperimentStatus, Status
from .exporter import CommandsExporter
from .logger import LOGGER
from .maintainer import Container, Maintainer
from .pipeline import Pipeline
from .stages import Stage
from .task import Experiment, Task
from .triton import Triton
from .utils import (
clean_directory,
exec_command,
format_env_key,
format_env_value,
get_result_path,
measurement_env_params,
offline_performance_configuration,
online_performance_configuration,
)
class Executor:
"""
Experiments executor
"""
def __init__(
self,
workspace: pathlib.Path,
maintainer: Maintainer,
pipeline: Pipeline,
devices: List[str] = None,
):
"""
Initialize experiments executor
Args:
workspace: Path to workspace to store artifacts
maintainer: maintainer for running commands
pipeline: pipeline definition
devices: List of devices on which Triton Inference Server will be executed
"""
self._maintainer = maintainer
self._pipeline = pipeline
self._devices = devices or ["0"]
self._workspace = workspace
self._executor_workspace = workspace / "executor"
self._shared_dir = self._executor_workspace / "shared"
self._triton_models_repository_dir = self._executor_workspace / "triton_models"
self._scripts_dir = self._executor_workspace / "scripts"
self._libraries_dir = self._executor_workspace / "libs"
self._exporter = CommandsExporter(self._scripts_dir)
self._triton_container: Optional[Container] = None
def start(self, task: Task):
"""
Process the task and execute experiments.
"""
self._create_dirs()
total_experiment = len(task.experiments)
LOGGER.info(f"Total experiments to verify: {total_experiment}")
for idx, experiment in enumerate(task.experiments, start=1):
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Started ================{Fore.RESET}" # noqa: B950
)
results = {}
environment = self._prepare_environment(task, experiment)
LOGGER.info("Experiment details")
LOGGER.info(json.dumps(environment, indent=4))
self._clean_experiment_artifacts(idx, total_experiment)
self._create_experiment_results_dir(task, experiment)
experiment.start()
LOGGER.info("Running Triton Servers:")
log_file = self._workspace / task.logs_dir / f"triton-server-experiment-{idx}.log"
self._triton_container = self._triton_server_container(
triton_container_image=task.triton_container_image,
framework=task.framework,
accelerator=experiment.parameters.get("backend_accelerator")
or experiment.parameters.get("accelerator"),
precision=experiment.parameters["precision"],
custom_library=bool(task.triton_custom_operations is not None),
load_model_method=task.triton_load_model_method,
log_file=log_file,
)
try:
self._triton_container.start()
for stage in self._pipeline.stages():
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Started ================{Fore.RESET}" # noqa: B950
)
experiment_stage = experiment.stages[stage.label]
experiment_stage.start()
is_ok = self._run_stage(stage=stage)
if not is_ok:
LOGGER.error(f"Stage {stage.label} failed.")
break
self._save_results(task, experiment, stage.label, results)
experiment_stage.end()
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Finished ================{Fore.RESET}" # noqa: B950
)
except Exception:
message = traceback.format_exc()
LOGGER.error(f"Error running experiment: {message}")
yield ExperimentResult(
status=Status(state=ExperimentStatus.FAILED, message=message),
experiment=experiment,
results=results,
)
finally:
self._triton_container.stop()
experiment.end()
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Finished ================{Fore.RESET}" # noqa: B950
)
yield ExperimentResult(
status=Status(state=ExperimentStatus.SUCCEED, message="Experiment Succeed"),
experiment=experiment,
results=results,
)
def stop(self) -> None:
"""
Stop executor
Returns:
None
"""
if self._triton_container:
self._triton_container.stop()
def _prepare_environment(self, task: Task, experiment: Experiment) -> Dict:
"""
Prepare environment data and export it
Args:
experiment: Experiment data
Returns:
Dictionary with environment data
"""
environment = {
"MODEL_NAME": task.model_name,
"ENSEMBLE_MODEL_NAME": task.ensemble_model_name,
"FRAMEWORK": task.framework,
"SHARED_DIR": self._shared_dir.as_posix(),
"MODEL_REPOSITORY_PATH": self._triton_models_repository_dir.as_posix(),
"TRITON_SERVER_URL": "localhost",
"TRITON_LOAD_MODEL_METHOD": task.triton_load_model_method,
"PERFORMANCE_TOOL": task.performance_tool.value,
"MODEL_BATCHING": task.batching,
}
measurement_params = self._measurement_params(
max_batch_size=experiment.parameters["max_batch_size"],
number_of_model_instances=experiment.parameters["number_of_model_instances"],
batching=task.batching,
steps_online=task.measurement_steps_online,
steps_offline=task.measurement_steps_offline,
)
environment = {
**environment,
**measurement_params,
}
if experiment.checkpoint:
environment["CHECKPOINT_DIR"] = task.checkpoints[experiment.checkpoint].path.as_posix()
if task.datasets_dir:
environment["DATASETS_DIR"] = task.datasets_dir.as_posix()
for key, value in experiment.parameters.items():
key = format_env_key(key)
value = format_env_value(value)
environment[key] = value
for key, value in environment.items():
os.environ[key] = str(value)
return environment
def _triton_server_container(
self,
triton_container_image: str,
framework: str,
load_model_method: str,
accelerator: str,
precision: str,
log_file: pathlib.Path,
custom_library: bool,
) -> Container:
"""
Create Triton Inference Server container for experiment
Args:
triton_container_image: Triton Inference Server container image
framework: Framework used to run model
accelerator: Accelerator used for experiment
precision: Precision used for experiment
load_model_method: Configure how Triton will load model
log_file: File where Triton logs are stored
Returns:
Container object
"""
volumes = {
self._triton_models_repository_dir: {"bind": Paths.MODEL_REPOSITORY_PATH, "mode": "rw"},
self._libraries_dir: {"bind": Paths.LIBRARIES_PATH, "mode": "rw"},
}
environment = {
"MODEL_REPOSITORY_PATH": Paths.MODEL_REPOSITORY_PATH,
"LIBRARIES_PATH": Paths.LIBRARIES_PATH,
"TRITON_LOAD_MODEL_METHOD": load_model_method,
}
if custom_library:
library_path = Triton.library_path(framework=framework)
environment["LD_LIBRARY_PATH"] = f"{library_path}:${{LD_LIBRARY_PATH}}"
environment["LD_PRELOAD"] = Triton.custom_library_path_remote()
if accelerator == BackendAccelerator.TRT.value and precision == Precision.FP16.value:
environment["ORT_TENSORRT_FP16_ENABLE"] = 1
strict_mode = False
command = Triton.command(
framework=framework,
repository_path=Paths.MODEL_REPOSITORY_PATH,
strict_mode=strict_mode,
)
command = f' bash -c "{command}"'
container = self._maintainer.triton_container(
command=command,
image=triton_container_image,
devices=self._devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
return container
def _save_results(self, task: Task, experiment: Experiment, stage_name: str, results: Dict) -> None:
"""
Update results for stage
Args:
task: Task object
experiment: Experiment for which stage has to be updated
stage_name: Name of stage
results: Results path mapping
Returns:
None
"""
stage = experiment.stages[stage_name]
if not stage.result_path:
LOGGER.debug(f"No results file to copy for {stage.name}")
return
if not stage.result_type:
LOGGER.debug(f"No results type provided for {stage.name}")
return
os.environ["SHARED_DIR"] = self._shared_dir.as_posix()
result_path = get_result_path(result_path=stage.result_path)
result_path = pathlib.Path(result_path)
if not result_path.is_file() and not result_path.is_dir():
raise RunnerException(f"Results file {result_path} not found.")
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
LOGGER.info(f"Saving {stage.result_type} to {experiment_dir}")
if result_path.is_dir():
dst_path = experiment_dir / stage.result_type
shutil.copytree(result_path, dst_path)
elif result_path.is_file():
suffix = result_path.suffix
dst_path = experiment_dir / f"{stage.result_type}{suffix}"
shutil.copy(result_path, dst_path)
else:
raise RunnerException(f"Result not found {result_path}")
LOGGER.info("Done")
results[stage.result_type] = dst_path
def _create_dirs(self) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Started ================{Fore.RESET}"
) # noqa: B950
if self._executor_workspace.is_dir():
LOGGER.info(f"Removing previous executor workspace: {self._executor_workspace}")
shutil.rmtree(self._executor_workspace)
for directory in [
self._libraries_dir,
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
directory.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory.name} created.")
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Finished ================{Fore.RESET}"
)
def _clean_experiment_artifacts(self, idx: int, total: int) -> None:
"""
Clean artifacts stored between experiments
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Started ================{Fore.RESET}" # noqa: B950
)
for directory in [
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
clean_directory(directory)
LOGGER.info(f"Location {directory} cleaned.")
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Finished ================{Fore.RESET}" # noqa: B950
)
def _create_experiment_results_dir(self, task: Task, experiment: Experiment):
"""
Create result directory for experiment
Returns:
"""
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
experiment_dir.mkdir(parents=True, exist_ok=True)
def _prepare_triton_custom_operations(self, task: Task) -> None:
"""
Prepare Triton Server custom operations library
Returns:
None
"""
if task.triton_custom_operations:
target_library_path = Triton.custom_library_path_local(self._libraries_dir)
target_library_path_dir = target_library_path.parent
target_library_path_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(task.triton_custom_operations, target_library_path)
def _run_stage(self, stage: Stage) -> bool:
"""
Run single stage commands
Args:
stage: Stage object with defined commands
Returns:
True on success, False otherwise
"""
try:
command = self._exporter.export(stage=stage)
exec_command(command)
except RunnerException:
return False
return True
def _measurement_params(
self,
max_batch_size: int,
number_of_model_instances: int,
steps_offline: int,
steps_online: int,
batching: str,
):
max_batch_size = int(max_batch_size)
if batching == Batching.DISABLED.value:
LOGGER.debug("Model does not support batching.")
measurement = Measurement(
offline_batch_sizes=[1],
offline_concurrency=[1],
online_batch_sizes=[1],
online_concurrency=[1],
min_shapes_batch=max_batch_size,
opt_shapes_batch=max_batch_size,
max_shapes_batch=max_batch_size,
)
return measurement
offline_batch_sizes, offline_concurrency = offline_performance_configuration(
steps=steps_offline,
max_batch_size=max_batch_size,
)
if batching == Batching.DYNAMIC.value:
online_batch_sizes, online_concurrency = online_performance_configuration(
steps=steps_online,
max_batch_size=max_batch_size,
number_of_model_instances=number_of_model_instances,
)
else:
online_batch_sizes, online_concurrency = offline_batch_sizes, offline_concurrency
min_batch_size = min(min(offline_batch_sizes), min(online_batch_sizes))
measurement = Measurement(
offline_batch_sizes=offline_batch_sizes,
offline_concurrency=offline_concurrency,
online_batch_sizes=online_batch_sizes,
online_concurrency=online_concurrency,
min_shapes_batch=min_batch_size,
opt_shapes_batch=max_batch_size,
max_shapes_batch=max_batch_size,
)
return measurement_env_params(measurement)
|
TensorFlow2/LanguageModeling/BERT/official/modeling | modeling | model_training_utils_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for official.modeling.training.model_training_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.modeling import model_training_utils
def eager_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode='eager',
)
def eager_gpu_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode='eager',
)
def create_fake_data_input_fn(batch_size, features_shape, num_classes):
"""Creates a dummy input function with the given feature and label shapes.
Args:
batch_size: integer.
features_shape: list[int]. Feature shape for an individual example.
num_classes: integer. Number of labels.
Returns:
An input function that is usable in the executor.
"""
def _dataset_fn(input_context=None):
"""An input function for generating fake data."""
local_batch_size = input_context.get_per_replica_batch_size(batch_size)
features = np.random.rand(64, *features_shape)
labels = np.random.randint(2, size=[64, num_classes])
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
def _assign_dtype(features, labels):
features = tf.cast(features, tf.float32)
labels = tf.cast(labels, tf.float32)
return features, labels
# Shuffle, repeat, and batch the examples.
dataset = dataset.map(_assign_dtype)
dataset = dataset.shuffle(64).repeat()
dataset = dataset.batch(local_batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=64)
return dataset
return _dataset_fn
def create_model_fn(input_shape, num_classes, use_float16=False):
def _model_fn():
"""A one-layer softmax model suitable for testing."""
input_layer = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Dense(num_classes, activation='relu')(input_layer)
output_layer = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
sub_model = tf.keras.models.Model(input_layer, x, name='sub_model')
model = tf.keras.models.Model(input_layer, output_layer, name='model')
model.add_metric(
tf.reduce_mean(input_layer), name='mean_input', aggregation='mean')
model.optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9)
if use_float16:
model.optimizer = (
tf.keras.mixed_precision.experimental.LossScaleOptimizer(
model.optimizer, loss_scale='dynamic'))
return model, sub_model
return _model_fn
def metric_fn():
"""Gets a tf.keras metric object."""
return tf.keras.metrics.CategoricalAccuracy(name='accuracy', dtype=tf.float32)
def summaries_with_matching_keyword(keyword, summary_dir):
"""Yields summary protos matching given keyword from event file."""
event_paths = tf.io.gfile.glob(os.path.join(summary_dir, 'events*'))
for event in tf.compat.v1.train.summary_iterator(event_paths[-1]):
if event.summary is not None:
for value in event.summary.value:
if keyword in value.tag:
tf.compat.v1.logging.error(event)
yield event.summary
def check_eventfile_for_keyword(keyword, summary_dir):
"""Checks event files for the keyword."""
return any(summaries_with_matching_keyword(keyword, summary_dir))
class ModelTrainingUtilsTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(ModelTrainingUtilsTest, self).setUp()
self._model_fn = create_model_fn(input_shape=[128], num_classes=3)
def run_training(self, strategy, model_dir, steps_per_loop, run_eagerly):
input_fn = create_fake_data_input_fn(
batch_size=8, features_shape=[128], num_classes=3)
model_training_utils.run_customized_training_loop(
strategy=strategy,
model_fn=self._model_fn,
loss_fn=tf.keras.losses.categorical_crossentropy,
model_dir=model_dir,
steps_per_epoch=20,
steps_per_loop=steps_per_loop,
epochs=2,
train_input_fn=input_fn,
eval_input_fn=input_fn,
eval_steps=10,
init_checkpoint=None,
metric_fn=metric_fn,
custom_callbacks=None,
run_eagerly=run_eagerly)
@combinations.generate(eager_strategy_combinations())
def test_train_eager_single_step(self, distribution):
model_dir = self.get_temp_dir()
if isinstance(distribution, tf.distribute.experimental.TPUStrategy):
with self.assertRaises(ValueError):
self.run_training(
distribution, model_dir, steps_per_loop=1, run_eagerly=True)
else:
self.run_training(
distribution, model_dir, steps_per_loop=1, run_eagerly=True)
@combinations.generate(eager_gpu_strategy_combinations())
def test_train_eager_mixed_precision(self, distribution):
model_dir = self.get_temp_dir()
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
tf.keras.mixed_precision.experimental.set_policy(policy)
self._model_fn = create_model_fn(
input_shape=[128], num_classes=3, use_float16=True)
self.run_training(
distribution, model_dir, steps_per_loop=1, run_eagerly=True)
@combinations.generate(eager_strategy_combinations())
def test_train_check_artifacts(self, distribution):
model_dir = self.get_temp_dir()
self.run_training(
distribution, model_dir, steps_per_loop=10, run_eagerly=False)
# Two checkpoints should be saved after two epochs.
self.assertNotEmpty(tf.io.gfile.glob(os.path.join(model_dir, 'ctl_step_*')))
self.assertNotEmpty(
tf.io.gfile.glob(
os.path.join(model_dir, 'summaries/training_summary*')))
# Loss and accuracy values should be written into summaries.
self.assertTrue(
check_eventfile_for_keyword('loss',
os.path.join(model_dir, 'summaries/train')))
self.assertTrue(
check_eventfile_for_keyword('accuracy',
os.path.join(model_dir, 'summaries/train')))
self.assertTrue(
check_eventfile_for_keyword('mean_input',
os.path.join(model_dir, 'summaries/train')))
self.assertTrue(
check_eventfile_for_keyword('accuracy',
os.path.join(model_dir, 'summaries/eval')))
self.assertTrue(
check_eventfile_for_keyword('mean_input',
os.path.join(model_dir, 'summaries/eval')))
if __name__ == '__main__':
assert tf.version.VERSION.startswith('2.')
tf.test.main()
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs/ci | ci | e2e_mask_rcnn_R_50_FPN_1x | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "/data3/joc_checkpoints/pytorch/maskrcnn/R-50.pkl"
BACKBONE:
CONV_BODY: "R-50-FPN"
OUT_CHANNELS: 256
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TRAIN: 12000
ROI_HEADS:
USE_FPN: True
ROI_BOX_HEAD:
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"
PREDICTOR: "FPNPredictor"
ROI_MASK_HEAD:
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"
PREDICTOR: "MaskRCNNC4Predictor"
POOLER_RESOLUTION: 14
POOLER_SAMPLING_RATIO: 2
RESOLUTION: 28
SHARE_BOX_FEATURE_EXTRACTOR: False
MASK_ON: True
DATASETS:
TRAIN: ("coco_2017_train",)
TEST: ("coco_2017_val",)
DATALOADER:
SIZE_DIVISIBILITY: 32
SOLVER:
BASE_LR: 0.12
WEIGHT_DECAY: 0.0001
STEPS: (12000, 16000)
MAX_ITER: 16667
IMS_PER_BATCH: 96
WARMUP_FACTOR: 0.0001
WARMUP_ITERS: 800
WARMUP_METHOD: "mlperf_linear"
TEST:
IMS_PER_BATCH: 8
PATHS_CATALOG: "maskrcnn_benchmark/config/paths_catalog_ci.py"
OUTPUT_DIR: "."
|
PaddlePaddle/Classification/RN50v1.5/utils | utils | __init__ | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2AttentionPlugin | taco2AttentionPlugin | taco2AttentionLayerPluginCreator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_ENERGYLAYERPLUGINCREATOR_H
#define TT2I_ENERGYLAYERPLUGINCREATOR_H
#include "NvInfer.h"
#include <string>
#ifdef DEVEL
// The destructor of nvinfer1::IPluginCreator is non-virtual and public, so
// we need to supress the warning.
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#endif
namespace nvinfer1
{
namespace plugin
{
class Taco2AttentionLayerPluginCreator : public nvinfer1::IPluginCreator
{
public:
/**
* @brief Get the collection of fields for this plugin, with their names only.
*
* @return The collection of fields.
*/
static nvinfer1::PluginFieldCollection* getFields();
/**
* @brief Create a new Taco2AttentionLayerPluginCreator.
*/
Taco2AttentionLayerPluginCreator();
/**
* @brief Get the name of the plugin.
*
* @return The name of the plugin.
*/
const char* getPluginName() const override;
/**
* @brief Get the plugin version.
*
* @return The plugin version.
*/
const char* getPluginVersion() const override;
/**
* @brief Get the collection of fields for this plugin.
*
* @return The collection of fields.
*/
const nvinfer1::PluginFieldCollection* getFieldNames() override;
/**
* @brief Create a new Taco2AttentionLayerPlugin.
*
* @param name The name (unused currently).
* @param fc The collection of fields to initialize with.
*
* @return The created plugin.
*/
nvinfer1::IPluginV2* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) override;
/**
* @brief Create a custom layer by name from a data stream.
*
* @param layerName The name of the layer.
* @param serialData The serialized data for the layer.
* @param serialLength The length of the serialized data.
*
* @return The plugin. Clients must destroy the plugin once all consumers of
* it have been destroyed.
*/
nvinfer1::IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override;
/**
* @brief Set the namespace for created plugins.
*
* @param pluginNamespace The namespace.
*/
void setPluginNamespace(const char* pluginNamespace) override;
/**
* @brief Get the namespace for created plugins.
*
* @return The namespace.
*/
const char* getPluginNamespace() const override;
private:
std::string mNamespace;
};
} // namespace plugin
} // namespace nvinfer1
#ifdef DEVEL
#pragma GCC diagnostic pop
#endif
#endif
|
TensorFlow/Detection/SSD/models/research/object_detection/meta_architectures | meta_architectures | ssd_meta_arch_test_lib | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for SSD models meta architecture tests."""
import functools
import tensorflow as tf
from object_detection.core import anchor_generator
from object_detection.core import balanced_positive_negative_sampler as sampler
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import target_assigner
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.protos import model_pb2
from object_detection.utils import ops
from object_detection.utils import test_case
from object_detection.utils import test_utils
slim = tf.contrib.slim
keras = tf.keras.layers
class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""Fake ssd feature extracture for ssd meta arch tests."""
def __init__(self):
super(FakeSSDFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams_fn=None)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def extract_features(self, preprocessed_inputs):
with tf.variable_scope('mock_model'):
features = slim.conv2d(
inputs=preprocessed_inputs,
num_outputs=32,
kernel_size=1,
scope='layer1')
return [features]
class FakeSSDKerasFeatureExtractor(ssd_meta_arch.SSDKerasFeatureExtractor):
"""Fake keras based ssd feature extracture for ssd meta arch tests."""
def __init__(self):
with tf.name_scope('mock_model'):
super(FakeSSDKerasFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
conv_hyperparams=None,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
)
self._conv = keras.Conv2D(filters=32, kernel_size=1, name='layer1')
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def _extract_features(self, preprocessed_inputs, **kwargs):
with tf.name_scope('mock_model'):
return [self._conv(preprocessed_inputs)]
class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):
"""A simple 2x2 anchor grid on the unit square used for test only."""
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list, im_height, im_width):
return [
box_list.BoxList(
tf.constant(
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[1., 1., 1.5, 1.5] # Anchor that is outside clip_window.
],
tf.float32))
]
def num_anchors(self):
return 4
class SSDMetaArchTestBase(test_case.TestCase):
"""Base class to test SSD based meta architectures."""
def _create_model(
self,
model_fn=ssd_meta_arch.SSDMetaArch,
apply_hard_mining=True,
normalize_loc_loss_by_codesize=False,
add_background_class=True,
random_example_sampling=False,
expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE,
min_num_negative_samples=1,
desired_negative_sampling_ratio=3,
use_keras=False,
predict_mask=False,
use_static_shapes=False,
nms_max_size_per_class=5):
is_training = False
num_classes = 1
mock_anchor_generator = MockAnchorGenerator2x2()
if use_keras:
mock_box_predictor = test_utils.MockKerasBoxPredictor(
is_training, num_classes, add_background_class=add_background_class)
else:
mock_box_predictor = test_utils.MockBoxPredictor(
is_training, num_classes, add_background_class=add_background_class)
mock_box_coder = test_utils.MockBoxCoder()
if use_keras:
fake_feature_extractor = FakeSSDKerasFeatureExtractor()
else:
fake_feature_extractor = FakeSSDFeatureExtractor()
mock_matcher = test_utils.MockMatcher()
region_similarity_calculator = sim_calc.IouSimilarity()
encode_background_as_zeros = False
def image_resizer_fn(image):
return [tf.identity(image), tf.shape(image)]
classification_loss = losses.WeightedSigmoidClassificationLoss()
localization_loss = losses.WeightedSmoothL1LocalizationLoss()
non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=-20.0,
iou_thresh=1.0,
max_size_per_class=nms_max_size_per_class,
max_total_size=nms_max_size_per_class,
use_static_shapes=use_static_shapes)
classification_loss_weight = 1.0
localization_loss_weight = 1.0
negative_class_weight = 1.0
normalize_loss_by_num_matches = False
hard_example_miner = None
if apply_hard_mining:
# This hard example miner is expected to be a no-op.
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=1.0)
random_example_sampler = None
if random_example_sampling:
random_example_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=0.5)
target_assigner_instance = target_assigner.TargetAssigner(
region_similarity_calculator,
mock_matcher,
mock_box_coder,
negative_class_weight=negative_class_weight)
model_config = model_pb2.DetectionModel()
if expected_loss_weights == model_config.ssd.loss.NONE:
expected_loss_weights_fn = None
else:
raise ValueError('Not a valid value for expected_loss_weights.')
code_size = 4
kwargs = {}
if predict_mask:
kwargs.update({
'mask_prediction_fn': test_utils.MockMaskHead(num_classes=1).predict,
})
model = model_fn(
is_training=is_training,
anchor_generator=mock_anchor_generator,
box_predictor=mock_box_predictor,
box_coder=mock_box_coder,
feature_extractor=fake_feature_extractor,
encode_background_as_zeros=encode_background_as_zeros,
image_resizer_fn=image_resizer_fn,
non_max_suppression_fn=non_max_suppression_fn,
score_conversion_fn=tf.identity,
classification_loss=classification_loss,
localization_loss=localization_loss,
classification_loss_weight=classification_loss_weight,
localization_loss_weight=localization_loss_weight,
normalize_loss_by_num_matches=normalize_loss_by_num_matches,
hard_example_miner=hard_example_miner,
target_assigner_instance=target_assigner_instance,
add_summaries=False,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
freeze_batchnorm=False,
inplace_batchnorm_update=False,
add_background_class=add_background_class,
random_example_sampler=random_example_sampler,
expected_loss_weights_fn=expected_loss_weights_fn,
**kwargs)
return model, num_classes, mock_anchor_generator.num_anchors(), code_size
def _get_value_for_matching_key(self, dictionary, suffix):
for key in dictionary.keys():
if key.endswith(suffix):
return dictionary[key]
raise ValueError('key not found {}'.format(suffix))
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_mobilenet_v2_feature_extractor_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_feature_extractor."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_feature_extractor
from object_detection.models import ssd_mobilenet_v2_keras_feature_extractor
slim = tf.contrib.slim
@parameterized.parameters(
{'use_keras': False},
{'use_keras': True},
)
class SsdMobilenetV2FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
if use_keras:
return (ssd_mobilenet_v2_keras_feature_extractor.
SSDMobileNetV2KerasFeatureExtractor(
is_training=False,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
name='MobilenetV2'))
else:
return ssd_mobilenet_v2_feature_extractor.SSDMobileNetV2FeatureExtractor(
False,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding)
def test_extract_features_returns_correct_shapes_128(self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_returns_correct_shapes_128_explicit_padding(
self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(
self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_returns_correct_shapes_299(self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_raises_error_with_invalid_image_size(
self, use_keras):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=use_keras)
def test_preprocess_returns_correct_value_range(self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=use_keras)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self, use_keras):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=use_keras)
def test_variable_count(self, use_keras):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=use_keras)
self.assertEqual(len(variables), 292)
def test_has_fused_batchnorm(self, use_keras):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=use_keras)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
if use_keras:
_ = feature_extractor(preprocessed_image)
else:
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(any(op.type == 'FusedBatchNorm'
for op in tf.get_default_graph().get_operations()))
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechRecognition/QuartzNet/utils | utils | preprocessing_utils | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import multiprocessing
import functools
import sox
from tqdm import tqdm
def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None,
overwrite=True):
speed = speed or []
speed.append(1)
speed = list(set(speed)) # Make uniqe
input_fname = os.path.join(input_dir,
data['input_relpath'],
data['input_fname'])
input_sr = sox.file_info.sample_rate(input_fname)
target_sr = target_sr or input_sr
os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True)
output_dict = {}
output_dict['transcript'] = data['transcript'].lower().strip()
output_dict['files'] = []
fname = os.path.splitext(data['input_fname'])[0]
for s in speed:
output_fname = fname + '{}.wav'.format('' if s==1 else '-{}'.format(s))
output_fpath = os.path.join(dest_dir,
data['input_relpath'],
output_fname)
if not os.path.exists(output_fpath) or overwrite:
cbn = sox.Transformer().speed(factor=s).convert(target_sr)
cbn.build(input_fname, output_fpath)
file_info = sox.file_info.info(output_fpath)
file_info['fname'] = os.path.join(os.path.basename(dest_dir),
data['input_relpath'],
output_fname)
file_info['speed'] = s
output_dict['files'].append(file_info)
if s == 1:
file_info = sox.file_info.info(output_fpath)
output_dict['original_duration'] = file_info['duration']
output_dict['original_num_samples'] = file_info['num_samples']
return output_dict
def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed, overwrite, parallel):
with multiprocessing.Pool(parallel) as p:
func = functools.partial(preprocess,
input_dir=input_dir, dest_dir=dest_dir,
target_sr=target_sr, speed=speed, overwrite=overwrite)
dataset = list(tqdm(p.imap(func, dataset), total=len(dataset)))
return dataset
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B0/evaluation | evaluation | evaluation_TF32_A100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python3 main.py --cfg config/efficientnet_v1/b0_cfg.py \
--mode eval \
--use_xla \
--model_dir ./output \
--data_dir /data \
--eval_batch_size 512
|
PyTorch/Recommendation/NCF | NCF | dataloading | # Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import os
from feature_spec import FeatureSpec
from neumf_constants import USER_CHANNEL_NAME, ITEM_CHANNEL_NAME, LABEL_CHANNEL_NAME, TEST_SAMPLES_PER_SERIES
class TorchTensorDataset:
""" Warning! This dataset/loader uses torch.load. Torch.load implicitly uses pickle. Pickle is insecure.
It is trivial to achieve arbitrary code execution using a prepared pickle payload. Only unpickle data you trust."""
def __init__(self, feature_spec: FeatureSpec, mapping_name: str, args):
self.local_rank = args.local_rank
self.mapping_name = mapping_name
self.features = dict()
self.feature_spec = feature_spec
self._load_features()
def _load_features(self):
chunks = self.feature_spec.source_spec[self.mapping_name]
for chunk in chunks:
assert chunk['type'] == 'torch_tensor', "Only torch_tensor files supported in this loader"
files_list = chunk['files']
assert len(files_list) == 1, "Only one file per chunk supported in this loader"
file_relative_path = files_list[0]
path_to_load = os.path.join(self.feature_spec.base_directory, file_relative_path)
chunk_data = torch.load(path_to_load, map_location=torch.device('cuda:{}'.format(self.local_rank)))
running_pos = 0
for feature_name in chunk['features']:
next_running_pos = running_pos + 1
feature_data = chunk_data[:, running_pos:next_running_pos]
# This is needed because slicing instead of indexing keeps the data 2-dimensional
feature_data = feature_data.reshape(-1, 1)
running_pos = next_running_pos
self.features[feature_name] = feature_data
class TestDataLoader:
def __init__(self, dataset: TorchTensorDataset, args):
self.dataset = dataset
self.feature_spec = dataset.feature_spec
self.channel_spec = self.feature_spec.channel_spec
self.samples_in_series = self.feature_spec.metadata[TEST_SAMPLES_PER_SERIES]
self.raw_dataset_length = None # First feature loaded sets this. Total length before splitting across cards
self.data = dict()
self.world_size = args.world_size
self.local_rank = args.local_rank
self.batch_size = args.valid_batch_size
self._build_channel_dict()
self._deduplication_augmentation()
self._split_between_devices()
self._split_into_batches()
def _build_channel_dict(self):
for channel_name, channel_features in self.channel_spec.items():
channel_tensors = dict()
for feature_name in channel_features:
channel_tensors[feature_name] = self.dataset.features[feature_name]
if not self.raw_dataset_length:
self.raw_dataset_length = channel_tensors[feature_name].shape[0]
else:
assert self.raw_dataset_length == channel_tensors[feature_name].shape[0]
self.data[channel_name] = channel_tensors
def _deduplication_augmentation(self):
# Augmentation
# This adds a duplication mask tensor.
# This is here to exactly replicate the MLPerf training regime. Moving this deduplication to the candidate item
# generation stage increases the real diversity of the candidates, which makes the ranking task harder
# and results in a drop in HR@10 of approx 0.01. This has been deemed unacceptable (May 2021).
# We need the duplication mask to determine if a given item should be skipped during ranking
# If an item with label 1 is duplicated in the sampled ones, we need to be careful to not mark the one with
# label 1 as a duplicate. If an item appears repeatedly only with label 1, no duplicates are marked.
# To easily compute candidates, we sort the items. This will impact the distribution of examples between
# devices, but should not influence the numerics or performance meaningfully.
# We need to assure that the positive item, which we don't want to mark as a duplicate, appears first.
# We do this by adding labels as a secondary factor
# Reshape the tensors to have items for a given user in a single row
user_feature_name = self.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = self.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = self.channel_spec[LABEL_CHANNEL_NAME][0]
self.ignore_mask_channel_name = 'mask_ch'
self.ignore_mask_feature_name = 'mask'
items = self.data[ITEM_CHANNEL_NAME][item_feature_name].view(-1, self.samples_in_series)
users = self.data[USER_CHANNEL_NAME][user_feature_name].view(-1, self.samples_in_series)
labels = self.data[LABEL_CHANNEL_NAME][label_feature_name].view(-1, self.samples_in_series)
sorting_weights = items.float() - labels.float() * 0.5
_, indices = torch.sort(sorting_weights)
# The gather reorders according to the indices decided by the sort above
sorted_items = torch.gather(items, 1, indices)
sorted_labels = torch.gather(labels, 1, indices)
sorted_users = torch.gather(users, 1, indices)
dup_mask = sorted_items[:, 0:-1] == sorted_items[:, 1:] # This says if a given item is equal to the next one
dup_mask = dup_mask.type(torch.bool)
# The first item for a given user can never be a duplicate:
dup_mask = torch.cat((torch.zeros_like(dup_mask[:, 0:1]), dup_mask), dim=1)
# Reshape them back
self.data[ITEM_CHANNEL_NAME][item_feature_name] = sorted_items.view(-1, 1)
self.data[USER_CHANNEL_NAME][user_feature_name] = sorted_users.view(-1, 1)
self.data[LABEL_CHANNEL_NAME][label_feature_name] = sorted_labels.view(-1, 1)
self.data[self.ignore_mask_channel_name] = dict()
self.data[self.ignore_mask_channel_name][self.ignore_mask_feature_name] = dup_mask.view(-1, 1)
def _split_between_devices(self):
if self.world_size > 1:
# DO NOT REPLACE WITH torch.chunk (number of returned chunks can silently be lower than requested).
# It would break compatibility with small datasets.
num_test_cases = self.raw_dataset_length / self.samples_in_series
smaller_batch = (int(num_test_cases // self.world_size)) * self.samples_in_series
bigger_batch = smaller_batch + self.samples_in_series
remainder = int(num_test_cases % self.world_size)
samples_per_card = [bigger_batch] * remainder + [smaller_batch] * (self.world_size - remainder)
for channel_name, channel_dict in self.data.items():
for feature_name, feature_tensor in channel_dict.items():
channel_dict[feature_name] = \
channel_dict[feature_name].split(samples_per_card)[self.local_rank]
def _split_into_batches(self):
self.batches = None
# This is the structure of each batch, waiting to be copied and filled in with data
for channel_name, channel_dict in self.data.items():
for feature_name, feature_tensor in channel_dict.items():
feature_batches = feature_tensor.view(-1).split(self.batch_size)
if not self.batches:
self.batches = list(
{channel_name: dict() for channel_name in self.data.keys()} for _ in feature_batches)
for pos, feature_batch_data in enumerate(feature_batches):
self.batches[pos][channel_name][feature_name] = feature_batch_data
def get_epoch_data(self):
return self.batches
def get_ignore_mask(self):
return self.data[self.ignore_mask_channel_name][self.ignore_mask_feature_name]
class TrainDataloader:
def __init__(self, dataset: TorchTensorDataset, args):
self.dataset = dataset
self.local_rank = args.local_rank
if args.distributed:
self.local_batch = args.batch_size // args.world_size
else:
self.local_batch = args.batch_size
self.feature_spec = dataset.feature_spec
self.channel_spec = self.feature_spec.channel_spec
self.negative_samples = args.negative_samples
self.data = dict()
self.raw_dataset_length = None # first feature loaded sets this
self._build_channel_dict()
self.length_after_augmentation = self.raw_dataset_length * (self.negative_samples + 1)
samples_per_worker = self.length_after_augmentation / args.world_size
self.samples_begin = int(samples_per_worker * args.local_rank)
self.samples_end = int(samples_per_worker * (args.local_rank + 1))
def _build_channel_dict(self):
for channel_name, channel_features in self.channel_spec.items():
channel_tensors = dict()
for feature_name in channel_features:
channel_tensors[feature_name] = self.dataset.features[feature_name]
if not self.raw_dataset_length:
self.raw_dataset_length = channel_tensors[feature_name].shape[0]
else:
assert self.raw_dataset_length == channel_tensors[feature_name].shape[0]
self.data[channel_name] = channel_tensors
def get_epoch_data(self):
# Augment, appending args.negative_samples times the original set, now with random items end negative labels
augmented_data = {channel_name: dict() for channel_name in self.data.keys()}
user_feature_name = self.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = self.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = self.channel_spec[LABEL_CHANNEL_NAME][0]
# USER
user_tensor = self.data[USER_CHANNEL_NAME][user_feature_name]
neg_users = user_tensor.repeat(self.negative_samples, 1)
augmented_users = torch.cat((user_tensor, neg_users))
augmented_data[USER_CHANNEL_NAME][user_feature_name] = augmented_users
del neg_users
# ITEM
item_tensor = self.data[ITEM_CHANNEL_NAME][item_feature_name]
neg_items = torch.empty_like(item_tensor).repeat(self.negative_samples, 1) \
.random_(0, self.feature_spec.feature_spec[item_feature_name]['cardinality'])
augmented_items = torch.cat((item_tensor, neg_items))
augmented_data[ITEM_CHANNEL_NAME][item_feature_name] = augmented_items
del neg_items
# LABEL
label_tensor = self.data[LABEL_CHANNEL_NAME][label_feature_name]
neg_label = torch.zeros_like(label_tensor, dtype=torch.float32).repeat(self.negative_samples, 1)
augmented_labels = torch.cat((label_tensor, neg_label))
del neg_label
augmented_data[LABEL_CHANNEL_NAME][label_feature_name] = augmented_labels
# Labels are not shuffled between cards.
# This replicates previous behaviour.
epoch_indices = torch.randperm(self.samples_end - self.samples_begin, device='cuda:{}'.format(self.local_rank))
epoch_indices += self.samples_begin
batches = None
for channel_name, channel_dict in augmented_data.items():
for feature_name, feature_tensor in channel_dict.items():
# the last batch will almost certainly be smaller, drop it
# Warning: may not work if there's only one
feature_batches = feature_tensor.view(-1)[epoch_indices].split(self.local_batch)[:-1]
if not batches:
batches = list({channel_name: dict() for channel_name in self.data.keys()} for _ in feature_batches)
for pos, feature_batch_data in enumerate(feature_batches):
batches[pos][channel_name][feature_name] = feature_batch_data
return batches
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/waveglow | waveglow | waveGlowInstance | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "waveGlowInstance.h"
#include "blending.h"
#include "cudaUtils.h"
#include "dataShuffler.h"
#include "engineCache.h"
#include "normalDistribution.h"
#include "utils.h"
#include "NvOnnxParser.h"
#include "cuda_runtime.h"
#include <algorithm>
#include <iostream>
#include <memory>
#include <random>
#include <stdexcept>
#include <string>
using namespace nvinfer1;
using IParser = nvonnxparser::IParser;
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int NUM_OVERLAP = 0;
constexpr const char* const MEL_INPUT_NAME = "spectrograms";
constexpr const char* const Z_INPUT_NAME = "z";
constexpr const char* const OUTPUT_NAME = "waveglow_output";
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
WaveGlowInstance::WaveGlowInstance(TRTPtr<ICudaEngine> eng) :
TimedObject("WaveGlowInstance::infer()"),
mStreamingInstance(std::move(eng)),
mFrequency(22050),
mNumOverlap(std::min(NUM_OVERLAP, mStreamingInstance.getMaxOutputLength())),
mIndependentChunkSize(
mStreamingInstance.getMelSpacing()
- (mNumOverlap / getNumberOfSamplesPerFrame())),
mIndependentChunkSampleSize(
mIndependentChunkSize * getNumberOfSamplesPerFrame()),
mNumChunkMels(getMaxBatchSize()),
mNumChunkSamples(getMaxBatchSize()),
mInputFrame(
mStreamingInstance.getMelSpacing()
* mStreamingInstance.getNumMelChannels() * getMaxBatchSize()),
mOutputFrame(
mStreamingInstance.getRequiredOutputBufferSize(getMaxBatchSize()))
{
if (mIndependentChunkSampleSize + mNumOverlap != mStreamingInstance.getMaxOutputLength())
{
throw std::runtime_error("Overlap size must be a multiple of the number of samples per frame: "
+ std::to_string(mNumOverlap) + "/" + std::to_string(getNumberOfSamplesPerFrame()));
}
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void WaveGlowInstance::infer(const int batchSize, const float* const melsDevice, const int melSpacing,
const int* const numMels, const int numMaxSamples, float* outputDevice, int* numSamplesOut)
{
startTiming();
cudaStream_t stream;
if (cudaStreamCreate(&stream) != cudaSuccess)
{
throw std::runtime_error("Failed to create stream.");
}
mStreamingInstance.startInference(batchSize, stream);
// compute maximum number of chunks we'll need
int maxNumMels = 0;
for (int i = 0; i < batchSize; ++i)
{
if (numMels[i] > maxNumMels)
{
maxNumMels = numMels[i];
}
}
// make sure the number of chunks will not exceed any of our buffers
const int numChunks = std::min(
Utils::roundUpDiv(maxNumMels, mIndependentChunkSize),
numMaxSamples / mIndependentChunkSampleSize);
const int totalChunkSize = mStreamingInstance.getMelSpacing() * mStreamingInstance.getNumMelChannels();
for (int i = 0; i < batchSize; ++i)
{
numSamplesOut[i] = 0;
}
for (int chunk = 0; chunk < numChunks; ++chunk)
{
const int inputIdx = chunk * mIndependentChunkSize;
DataShuffler::frameTransfer(
melsDevice,
mInputFrame.data(),
melSpacing * mStreamingInstance.getNumMelChannels(),
inputIdx * mStreamingInstance.getNumMelChannels(),
totalChunkSize,
batchSize,
totalChunkSize,
0,
stream);
for (int i = 0; i < batchSize; ++i)
{
mNumChunkMels[i] = std::min(mStreamingInstance.getMelSpacing(), std::max(0, numMels[i] - inputIdx));
}
mStreamingInstance.inferNext(
stream,
mInputFrame.data(),
mNumChunkMels.data(),
mOutputFrame.data(),
mNumChunkSamples.data());
Blending::linear(
batchSize,
mOutputFrame.data(),
outputDevice,
mStreamingInstance.getOutputSpacing(),
mNumOverlap,
numMaxSamples,
chunk * mIndependentChunkSampleSize,
stream);
for (int i = 0; i < batchSize; ++i)
{
numSamplesOut[i] += mNumChunkSamples[i];
}
CudaUtils::sync(stream);
}
cudaStreamDestroy(stream);
stopTiming();
}
int WaveGlowInstance::getNumberOfSamplesPerFrame() const
{
return mStreamingInstance.getNumberOfSamplesPerFrame();
}
int WaveGlowInstance::getFrequency() const
{
return mFrequency;
}
int WaveGlowInstance::getMaxBatchSize() const
{
return mStreamingInstance.getMaxBatchSize();
}
} // namespace tts
|
TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/utils | utils | arguments | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
DEFAULT_DIR = "/outbrain"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path",
help="Path with the data required for NVTabular preprocessing. "
"If stats already exists under metadata_path preprocessing phase will be skipped.",
type=str,
default=f"{DEFAULT_DIR}/orig",
nargs="+",
)
parser.add_argument(
"--metadata_path",
help="Path with preprocessed NVTabular stats",
type=str,
default=f"{DEFAULT_DIR}/data",
nargs="+",
)
parser.add_argument(
"--use_dask",
default=False,
action="store_true",
help="Use multi-gpu preprocessing for nvTabular workflow",
)
return parser.parse_args()
|
TensorFlow/Detection/SSD/models/research/object_detection/g3doc | g3doc | running_on_cloud | # Running on Google Cloud ML Engine
The Tensorflow Object Detection API supports distributed training on Google
Cloud ML Engine. This section documents instructions on how to train and
evaluate your model using Cloud ML. The reader should complete the following
prerequistes:
1. The reader has created and configured a project on Google Cloud Platform.
See [the Cloud ML quick start guide](https://cloud.google.com/ml-engine/docs/quickstarts/command-line).
2. The reader has installed the Tensorflow Object Detection API as documented
in the [installation instructions](installation.md).
3. The reader has a valid data set and stored it in a Google Cloud Storage
bucket. See [this page](preparing_inputs.md) for instructions on how to generate
a dataset for the PASCAL VOC challenge or the Oxford-IIIT Pet dataset.
4. The reader has configured a valid Object Detection pipeline, and stored it
in a Google Cloud Storage bucket. See [this page](configuring_jobs.md) for
details on how to write a pipeline configuration.
Additionally, it is recommended users test their job by running training and
evaluation jobs for a few iterations
[locally on their own machines](running_locally.md).
## Packaging
In order to run the Tensorflow Object Detection API on Cloud ML, it must be
packaged (along with it's TF-Slim dependency and the
[pycocotools](https://github.com/cocodataset/cocoapi/tree/master/PythonAPI/pycocotools)
library). The required packages can be created with the following command
``` bash
# From tensorflow/models/research/
bash object_detection/dataset_tools/create_pycocotools_package.sh /tmp/pycocotools
python setup.py sdist
(cd slim && python setup.py sdist)
```
This will create python packages dist/object_detection-0.1.tar.gz,
slim/dist/slim-0.1.tar.gz, and /tmp/pycocotools/pycocotools-2.0.tar.gz.
## Running a Multiworker (GPU) Training Job on CMLE
Google Cloud ML requires a YAML configuration file for a multiworker training
job using GPUs. A sample YAML file is given below:
```
trainingInput:
runtimeVersion: "1.9"
scaleTier: CUSTOM
masterType: standard_gpu
workerCount: 9
workerType: standard_gpu
parameterServerCount: 3
parameterServerType: standard
```
Please keep the following guidelines in mind when writing the YAML
configuration:
* A job with n workers will have n + 1 training machines (n workers + 1 master).
* The number of parameters servers used should be an odd number to prevent
a parameter server from storing only weight variables or only bias variables
(due to round robin parameter scheduling).
* The learning rate in the training config should be decreased when using a
larger number of workers. Some experimentation is required to find the
optimal learning rate.
The YAML file should be saved on the local machine (not on GCP). Once it has
been written, a user can start a training job on Cloud ML Engine using the
following command:
```bash
# From tensorflow/models/research/
gcloud ml-engine jobs submit training object_detection_`date +%m_%d_%Y_%H_%M_%S` \
--runtime-version 1.9 \
--job-dir=gs://${MODEL_DIR} \
--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \
--module-name object_detection.model_main \
--region us-central1 \
--config ${PATH_TO_LOCAL_YAML_FILE} \
-- \
--model_dir=gs://${MODEL_DIR} \
--pipeline_config_path=gs://${PIPELINE_CONFIG_PATH}
```
Where `${PATH_TO_LOCAL_YAML_FILE}` is the local path to the YAML configuration,
`gs://${MODEL_DIR}` specifies the directory on Google Cloud Storage where the
training checkpoints and events will be written to and
`gs://${PIPELINE_CONFIG_PATH}` points to the pipeline configuration stored on
Google Cloud Storage.
Users can monitor the progress of their training job on the [ML Engine
Dashboard](https://console.cloud.google.com/mlengine/jobs).
Note: This sample is supported for use with 1.9 runtime version.
## Running a TPU Training Job on CMLE
Launching a training job with a TPU compatible pipeline config requires using a
similar command:
```bash
gcloud ml-engine jobs submit training `whoami`_object_detection_`date +%m_%d_%Y_%H_%M_%S` \
--job-dir=gs://${MODEL_DIR} \
--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \
--module-name object_detection.model_tpu_main \
--runtime-version 1.9 \
--scale-tier BASIC_TPU \
--region us-central1 \
-- \
--tpu_zone us-central1 \
--model_dir=gs://${MODEL_DIR} \
--pipeline_config_path=gs://${PIPELINE_CONFIG_PATH}
```
In contrast with the GPU training command, there is no need to specify a YAML
file and we point to the *object_detection.model_tpu_main* binary instead of
*object_detection.model_main*. We must also now set `scale-tier` to be
`BASIC_TPU` and provide a `tpu_zone`. Finally as before `pipeline_config_path`
points to a points to the pipeline configuration stored on Google Cloud Storage
(but is now must be a TPU compatible model).
## Running an Evaluation Job on CMLE
Note: You only need to do this when using TPU for training as it does not
interleave evaluation during training as in the case of Multiworker GPU
training.
Evaluation jobs run on a single machine, so it is not necessary to write a YAML
configuration for evaluation. Run the following command to start the evaluation
job:
```bash
gcloud ml-engine jobs submit training object_detection_eval_`date +%m_%d_%Y_%H_%M_%S` \
--runtime-version 1.9 \
--job-dir=gs://${MODEL_DIR} \
--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz,/tmp/pycocotools/pycocotools-2.0.tar.gz \
--module-name object_detection.model_main \
--region us-central1 \
--scale-tier BASIC_GPU \
-- \
--model_dir=gs://${MODEL_DIR} \
--pipeline_config_path=gs://${PIPELINE_CONFIG_PATH} \
--checkpoint_dir=gs://${MODEL_DIR}
```
Where `gs://${MODEL_DIR}` points to the directory on Google Cloud Storage where
training checkpoints are saved (same as the training job), as well as
to where evaluation events will be saved on Google Cloud Storage and
`gs://${PIPELINE_CONFIG_PATH}` points to where the pipeline configuration is
stored on Google Cloud Storage.
Typically one starts an evaluation job concurrently with the training job.
Note that we do not support running evaluation on TPU, so the above command
line for launching evaluation jobs is the same whether you are training
on GPU or TPU.
## Running Tensorboard
You can run Tensorboard locally on your own machine to view progress of your
training and eval jobs on Google Cloud ML. Run the following command to start
Tensorboard:
``` bash
tensorboard --logdir=gs://${YOUR_CLOUD_BUCKET}
```
Note it may Tensorboard a few minutes to populate with results.
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2 | tacotron2 | symbols | """ from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from utils.tacotron2 import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
|
TensorFlow/Segmentation/UNet_Industrial/scripts | scripts | UNet_AMP_EVAL_XLA | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet evaluation in FP32-AMP on 1 GPUs using 16 batch size
# Usage ./UNet_AMP_EVAL_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
python "${BASEDIR}/../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='evaluate' \
--iter_unit='epoch' \
--num_iter=1 \
--batch_size=16 \
--warmup_step=10 \
--results_dir="${1}" \
--data_dir="${2}" \
--dataset_name='DAGM2007' \
--dataset_classID="${3}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--amp \
--xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=50 \
--debug_verbosity=0
|
PyTorch/LanguageModeling/BERT/triton/runner | runner | task | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import platform
import subprocess
from datetime import datetime
from typing import Dict, List, Optional, Union
import cpuinfo
import psutil
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import CustomDumper, DataObject
from .experiment import Experiment
from .triton import Triton
class GPU(DataObject):
"""
GPU information data object
"""
name: str
driver_version: str
cuda_version: str
memory: str
tdp: str
def __init__(self, name: str, driver_version: str, cuda_version: str, memory: str, tdp: str):
"""
Args:
name: name of GPU
driver_version: version of driver
cuda_version: version of CUDA
memory: size of memory available on GPU [MB]
tdp: Max TDP of GPU unit
"""
self.name = name
self.driver_version = driver_version
self.cuda_version = cuda_version
self.memory = memory
self.tdp = tdp
@staticmethod
def from_dict(data: Dict):
"""
Create GPU object from dictionary
Args:
data: dictionary with GPU data
Returns:
GPU object
"""
return GPU(
name=data["name"],
driver_version=data["driver_version"],
cuda_version=data["cuda_version"],
memory=data["memory"],
tdp=data["tdp"],
)
@staticmethod
def from_host():
"""
Create GPU object from host data
Returns:
GPU object
"""
data = subprocess.check_output(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total,power.max_limit", "--format=csv"]
).decode()
lines = data.split(sep="\n")
device_details = lines[1].split(",")
name = device_details[0].strip()
driver_version = device_details[1].strip()
memory = device_details[2].strip()
tdp = device_details[3].strip()
cuda_version = None
data = subprocess.check_output(["nvidia-smi", "--query"]).decode()
lines = data.split(sep="\n")
for line in lines:
if line.startswith("CUDA Version"):
cuda_version = line.split(":")[1].strip()
break
return GPU(
name=name,
driver_version=driver_version,
cuda_version=cuda_version,
memory=memory,
tdp=tdp,
)
class CPU(DataObject):
"""
CPU details
"""
name: str
physical_cores: int
logical_cores: int
min_frequency: float
max_frequency: float
def __init__(self, name: str, physical_cores: int, logical_cores: int, min_frequency: float, max_frequency: float):
"""
Args:
name: name of CPU unit
physical_cores: number of physical cores available on CPU
logical_cores: number of logical cores available on CPU
min_frequency: minimal clock frequency
max_frequency: maximal clock frequency
"""
self.name = name
self.physical_cores = physical_cores
self.logical_cores = logical_cores
self.min_frequency = min_frequency
self.max_frequency = max_frequency
@staticmethod
def from_host():
"""
Create CPU object from host data
Returns:
CPU object
"""
return CPU(
name=cpuinfo.get_cpu_info()["brand_raw"],
physical_cores=psutil.cpu_count(logical=False),
logical_cores=psutil.cpu_count(logical=True),
min_frequency=psutil.cpu_freq().min,
max_frequency=psutil.cpu_freq().max,
)
class Memory(DataObject):
"""
Memory data object
"""
size: float
def __init__(self, size: float):
"""
Args:
size: RAM memory size in MB
"""
self.size = size
@staticmethod
def from_host():
"""
Create Memory object from host data
Returns:
Memory object
"""
svm = psutil.virtual_memory()
return Memory(size=svm.total)
class SystemInfo(DataObject):
"""
System Information data object
"""
system: str
cpu: CPU
memory: Memory
gpu: GPU
def __init__(self, system: str, cpu: CPU, memory: Memory, gpu: GPU):
"""
Args:
system: name of operating system
cpu: CPU info
memory: Memory info
gpu: GPU info
"""
self.system = system
self.cpu = cpu
self.memory = memory
self.gpu = gpu
@staticmethod
def from_host():
"""
Create SystemInfo object from host data
Returns:
SystemInfo object
"""
system = platform.platform()
gpu = GPU.from_host()
memory = Memory.from_host()
cpu = CPU.from_host()
return SystemInfo(system=system, cpu=cpu, gpu=gpu, memory=memory)
class Checkpoint(DataObject):
"""
Checkpoint data object
"""
def __init__(self, name: str, url: str, path: Union[str, pathlib.Path]):
"""
Args:
name: Name of checkpoint
path: Location of checkpoint on local hardware
"""
self.name = name
self.url = url
self.path = pathlib.Path(path)
class Dataset(DataObject):
"""
Dataset data object
"""
def __init__(self, name: str):
"""
Args:
name: Name of dataset
"""
self.name = name
class Task(DataObject):
"""
Task data object to store build information
"""
model_name: str
framework: str
started_at: int
ended_at: Optional[int]
container_version: str
checkpoints: Dict[str, Checkpoint]
datasets: Dict[str, Dataset]
datasets_dir: Optional[Union[str, pathlib.Path]]
experiments: List[Experiment]
system_info: SystemInfo
triton_container_image: Optional[str]
triton_custom_operations: Optional[str]
filename: str = "task.yaml"
results_dir: str = "results"
checkpoints_dir: str = "checkpoints"
def __init__(
self,
model_name: str,
framework: str,
container_version: str,
checkpoints: Dict,
datasets: Dict,
experiments: List,
system_info: SystemInfo,
started_at: int,
logs_dir: pathlib.Path = pathlib.Path("/var/logs"),
datasets_dir: Optional[Union[str, pathlib.Path]] = None,
ended_at: Optional[int] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: str = Triton.LOAD_MODE.EXPLICIT,
):
"""
Args:
model_name: Name of model
framework: Model framework
container_version: Container version used in task
checkpoints: List of checkpoints
datasets: List of datasets
datasets_dir: Directory where datasests are stored
experiments: List of experiments run as part of task
system_info: information about node on which experiment was executed
started_at: Time when task has started
ended_at: Time when task has ended
triton_container_image: Custom Triton Container Image used for task
triton_custom_operations: Custom operations library path
triton_load_model_method: Method how models are loaded on Triton
"""
self.started_at = started_at
self.ended_at = ended_at
self.model_name = model_name
self.framework = framework
self.container_version = container_version
self.checkpoints = checkpoints
self.datasets = datasets
self.datasets_dir = pathlib.Path(datasets_dir)
self.experiments = experiments
self.system_info = system_info
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.triton_load_model_method = triton_load_model_method
self.logs_dir = logs_dir
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.ended_at = int(datetime.utcnow().timestamp())
def to_file(self, file_path: Union[pathlib.Path, str]):
"""
Store task data to YAML file
Args:
file_path: path to file where task data has to be saved
Returns:
None
"""
task_data = self.to_dict()
with open(file_path, "w") as f:
yaml.dump(task_data, f, Dumper=CustomDumper, width=240, sort_keys=False)
|
TensorFlow2/Recommendation/WideAndDeep | WideAndDeep | .gitignore | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.idea/
*.tar
.ipynb_checkpoints
/_python_build
*.pyc
__pycache__
|
PyTorch/SpeechSynthesis/Tacotron2/scripts | scripts | prepare_mels | #!/usr/bin/env bash
set -e
DATADIR="LJSpeech-1.1"
FILELISTSDIR="filelists"
TESTLIST="$FILELISTSDIR/ljs_audio_text_test_filelist.txt"
TRAINLIST="$FILELISTSDIR/ljs_audio_text_train_filelist.txt"
VALLIST="$FILELISTSDIR/ljs_audio_text_val_filelist.txt"
TESTLIST_MEL="$FILELISTSDIR/ljs_mel_text_test_filelist.txt"
TRAINLIST_MEL="$FILELISTSDIR/ljs_mel_text_train_filelist.txt"
VALLIST_MEL="$FILELISTSDIR/ljs_mel_text_val_filelist.txt"
mkdir -p "$DATADIR/mels"
if [ $(ls $DATADIR/mels | wc -l) -ne 13100 ]; then
python preprocess_audio2mel.py --wav-files "$TRAINLIST" --mel-files "$TRAINLIST_MEL"
python preprocess_audio2mel.py --wav-files "$TESTLIST" --mel-files "$TESTLIST_MEL"
python preprocess_audio2mel.py --wav-files "$VALLIST" --mel-files "$VALLIST_MEL"
fi
|
PyTorch/Classification/GPUNet/triton/065ms/runner | runner | start_NVIDIA-DGX-1-(1x-V100-32GB) | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Evaluate Runner
python3 -m "triton.065ms.runner.__main__" \
--config-path "triton/065ms/runner/config_NVIDIA-DGX-1-(1x-V100-32GB).yaml" \
--device 0 |
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai | conversationalai | README |
## Model Preparation
### Clone the repository
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples
```
You will build our ConversationalAI in the Tacotron2 folder:
```bash
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai
```
### Download checkpoints
Download the PyTorch checkpoints from [NGC](https://ngc.nvidia.com/models):
* [Jasper](https://ngc.nvidia.com/catalog/models/nvidia:jasperpyt_fp16/files)
```bash
wget https://api.ngc.nvidia.com/v2/models/nvidia/jasperpyt_fp16/versions/1/files/jasper_fp16.pt
```
* [BERT](https://ngc.nvidia.com/catalog/models/nvidia:bert_large_pyt_amp_ckpt_squad_qa1_1/files?version=1)
```bash
wget https://api.ngc.nvidia.com/v2/models/nvidia/bert_large_pyt_amp_ckpt_squad_qa1_1/versions/1/files/bert_large_qa.pt
```
* [Tacotron 2](https://ngc.nvidia.com/catalog/models/nvidia:tacotron2_pyt_ckpt_amp/files?version=19.12.0)
```bash
wget https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_pyt_ckpt_amp/versions/19.12.0/files/nvidia_tacotron2pyt_fp16.pt
```
* [WaveGlow](https://ngc.nvidia.com/catalog/models/nvidia:waveglow_ckpt_amp_256/files?version=20.01.0)
```bash
wget https://api.ngc.nvidia.com/v2/models/nvidia/waveglow_ckpt_amp_256/versions/20.01.0/files/nvidia_waveglow256pyt_fp16.pt
```
Move the downloaded checkpoints to `models` directory:
```bash
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai
```
### Prepare Jasper
First, let's generate a TensorRT engine for Jasper using TensorRT version 7.
Download the Jasper checkpoint from [NGC](https://ngc.nvidia.com/catalog/models/nvidia:jasperpyt_fp16/files)
and move it to `Jasper/checkpoints/` direcotry:
```bash
mkdir -p DeepLearningExamples/PyTorch/SpeechRecognition/Jasper/checkpoints
mv jasper_fp16.pt DeepLearningExamples/PyTorch/SpeechRecognition/Jasper/checkpoints
```
Apply a patch to enable support of TensorRT 7:
```bash
cd DeepLearningExamples/
git apply --ignore-space-change --reject --whitespace=fix ../patch_jasper_trt7
```
Now, build a container for Jasper:
```bash
cd DeepLearningExamples/PyTorch/SpeechRecognition/Jasper/
bash tensorrt/scripts/docker/build.sh
```
To run the container, type:
```bash
cd DeepLearningExamples/PyTorch/SpeechRecognition/Jasper
export JASPER_DIR=${PWD}
export DATA_DIR=$JASPER_DIR/data/
export CHECKPOINT_DIR=$JASPER_DIR/checkpoints/
export RESULT_DIR=$JASPER_DIR/results/
cd $JASPER_DIR
mkdir -p $DATA_DIR $CHECKPOINT_DIR $RESULT_DIR
bash tensorrt/scripts/docker/launch.sh $DATA_DIR $CHECKPOINT_DIR $RESULT_DIR
```
Inside the container export Jasper TensorRT engine by executing:
```bash
pip install --upgrade onnx
mkdir -p /results/onnxs/ /results/engines/
cd /jasper
python tensorrt/perf.py --batch_size 1 --engine_batch_size 1 --model_toml configs/jasper10x5dr_nomask.toml --ckpt_path /checkpoints/jasper_fp16.pt --trt_fp16 --pyt_fp16 --engine_path /results/engines/jasper_fp16.engine --onnx_path /results/onnxs/fp32_DYNAMIC.onnx --seq_len 3600 --make_onnx
```
After successful export, copy the engine to model_repo:
```bash
cd DeepLearningExamples/Pytorch
mkdir -p SpeechSynthesis/Tacotron2/notebooks/conversationalai/model_repo/jasper-trt/1
cp SpeechRecognition/Jasper/results/engines/jasper_fp16.engine SpeechSynthesis/Tacotron2/notebooks/conversationalai/model_repo/jasper-trt/1/
```
You will also need Jasper feature extractor and decoder. Download them from [NGC](https://ngc.nvidia.com/catalog/models/nvidia:jasperpyt_jit_fp16/files) and move to the model_repo:
```bash
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/model_repo/
mkdir -p jasper-decoder/1 jasper-feature-extractor/1
wget -P jasper-decoder/ https://api.ngc.nvidia.com/v2/models/nvidia/jasperpyt_jit_fp16/versions/1/files/jasper-decoder/config.pbtxt
wget -P jasper-decoder/1/ https://api.ngc.nvidia.com/v2/models/nvidia/jasperpyt_jit_fp16/versions/1/files/jasper-decoder/1/jasper-decoder.pt
wget -P jasper-feature-extractor/ https://api.ngc.nvidia.com/v2/models/nvidia/jasperpyt_jit_fp16/versions/1/files/jasper-feature-extractor/config.pbtxt
wget -P jasper-feature-extractor/1/ https://api.ngc.nvidia.com/v2/models/nvidia/jasperpyt_jit_fp16/versions/1/files/jasper-feature-extractor/1/jasper-feature-extractor.pt
```
### Prepare BERT
With the generated Jasper model, we can proceed to BERT.
Download the BERT checkpoint from [NGC](https://ngc.nvidia.com/catalog/models/nvidia:bert_large_pyt_amp_ckpt_squad_qa1_1/files)
and move it to `BERT/checkpoints/` direcotry:
```bash
mkdir -p DeepLearningExamples/PyTorch/LanguageModeling/BERT/checkpoints/
mv bert_large_qa.pt DeepLearningExamples/PyTorch/LanguageModeling/BERT/checkpoints/bert_qa.pt
```
Now, build a container for BERT:
```bash
cd PyTorch/LanguageModeling/BERT/
bash scripts/docker/build.sh
```
Use the Triton export script to convert the model `checkpoints/bert_large_qa.pt` to ONNX:
```bash
bash triton/export_model.sh
```
The model will be saved in `results/triton_models/bertQA-onnx`, together with Triton configuration file. Copy the model and configuration file to the model_repo:
```bash
cd DeepLearningExamples
cp -r PyTorch/LanguageModeling/BERT/results/triton_models/bertQA-ts-script DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/model_repo/
```
### Prepare Tacotron 2 and WaveGlow
Now to the final part - TTS system.
Download the [Tacotron 2](https://ngc.nvidia.com/models/nvidia:tacotron2pyt_fp16/files?version=2) and [WaveGlow](https://ngc.nvidia.com/models/nvidia:waveglow256pyt_fp16/files) checkpoints from [NGC](https://ngc.nvidia.com/catalog/models/)
and move them to `Tacotron2/checkpoints/` direcotry:
```bash
mkdir -p DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/checkpoints/
mv nvidia_tacotron2pyt_fp16_20190427 nvidia_waveglow256pyt_fp16 DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/checkpoints/
```
Build the Tacotron 2 container:
```bash
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/
bash scripts/docker/build.sh
```
Run the container in th interactive mode by typing:
```bash
bash scripts/docker/interactive.sh
```
Export Tacotron 2 to TorchScript:
```bash
cd /workspace/tacotron2/
mkdir -p output
python notebooks/conversationalai/export_tacotron2_ts.py --tacotron2 notebooks/conversationalai/nvidia_tacotron2pyt_fp16.pt -o output/tacotron2_fp16.pt --fp16
```
Export WaveGlow to ONNX intermediate representation:
```bash
python tensorrt/convert_waveglow2onnx.py --waveglow notebooks/conversationalai/nvidia_waveglow256pyt_fp16.pt --wn-channels 256 --fp16 -o output/ --config-file config.json
```
Use the exported ONNX IR to generate TensorRT engine:
```bash
pip install pycuda
python tensorrt/convert_onnx2trt.py --waveglow output/waveglow.onnx -o output/ --fp16
```
After successful export, exit the container and copy the Tacotron 2 model and the WaveGlow engine to `model_repo`:
```bash
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/
mkdir -p notebooks/conversationalai/model_repo/tacotron2/1/ notebooks/conversationalai/model_repo/waveglow-trt/1/
cp output/tacotron2_fp16.pt notebooks/conversationalai/model_repo/tacotron2/1/
cp output/waveglow_fp16.engine notebooks/conversationalai/model_repo/waveglow-trt/1/
```
## Deployment
Will all models ready for deployment, go to the `conversationalai/client` folder and build the Triron client:
```bash
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client
docker build -f Dockerfile --network=host -t speech_ai_client:demo .
```
From terminal start the Triton server:
```bash
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai
NV_GPU=1 nvidia-docker run --ipc=host --network=host --rm -p8000:8000 -p8001:8001 -v ${PWD}/model_repo/:/models nvcr.io/nvidia/tritonserver:20.06-v1-py3 tritonserver --model-store=/models --log-verbose 1
```
In another another terminal run the client:
```bash
docker run -it --rm --network=host --device /dev/snd:/dev/snd speech_ai_client:demo bash /workspace/speech_ai_demo/start_jupyter.sh
```
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/waveglow | waveglow | waveGlowLoader | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_WAVEGLOWLOADER_H
#define TT2I_WAVEGLOWLOADER_H
#include "waveGlowInstance.h"
#include <memory>
#include <string>
// forward declaration
namespace nvinfer1
{
class ILogger;
class IBuilder;
} // namespace nvinfer1
namespace tts
{
class EngineCache;
class WaveGlowLoader
{
public:
/**
* @brief Load a new WaveGlowInstance from an engine file or a ONNX file. If
* an ONNX file is loaded, build and save the engine to same path with a
* `.eng` suffix.
*
* @param cache The engine cache for loading/saving the engine file.
* @param builder The TRT builder to use.
* @param logger The logger to use.
* @param filename The engine file or ONNX file to load.
* @param fp16 If building an engine from an ONNX file, whether or not to
* allow operations to be performed using fp16. If loading an engine file,
* this input is ignored.
* @param batchSize If building an engine from an ONNX file, the maximum
* batch size to support. If loading an engine file,
* this input is ignored.
*
* @return The instantiated WaveGlowInstance.
*/
static std::shared_ptr<WaveGlowInstance> load(EngineCache& cache, nvinfer1::IBuilder& builder,
std::shared_ptr<nvinfer1::ILogger> logger, const std::string& filename, bool fp16 = true, int batchSize = 8);
};
} // namespace tts
#endif
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/feature_specs | feature_specs | 10_num | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
- cat_6.bin
- cat_7.bin
- cat_8.bin
- cat_9.bin
- cat_10.bin
- cat_11.bin
- cat_12.bin
- cat_13.bin
- cat_14.bin
- cat_15.bin
- cat_16.bin
- cat_17.bin
- cat_18.bin
- cat_19.bin
- cat_20.bin
- cat_21.bin
- cat_22.bin
- cat_23.bin
- cat_24.bin
- cat_25.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
- num_3
- num_4
- num_5
- num_6
- num_7
- num_8
- num_9
feature_spec:
cat_0.bin:
cardinality: 100000
dtype: int32
cat_1.bin:
cardinality: 100001
dtype: int32
cat_10.bin:
cardinality: 100010
dtype: int32
cat_11.bin:
cardinality: 100011
dtype: int32
cat_12.bin:
cardinality: 100012
dtype: int32
cat_13.bin:
cardinality: 100013
dtype: int32
cat_14.bin:
cardinality: 100014
dtype: int32
cat_15.bin:
cardinality: 100015
dtype: int32
cat_16.bin:
cardinality: 100016
dtype: int32
cat_17.bin:
cardinality: 100017
dtype: int32
cat_18.bin:
cardinality: 100018
dtype: int32
cat_19.bin:
cardinality: 100019
dtype: int32
cat_2.bin:
cardinality: 100002
dtype: int32
cat_20.bin:
cardinality: 100020
dtype: int32
cat_21.bin:
cardinality: 100021
dtype: int32
cat_22.bin:
cardinality: 100022
dtype: int32
cat_23.bin:
cardinality: 100023
dtype: int32
cat_24.bin:
cardinality: 100024
dtype: int32
cat_25.bin:
cardinality: 100025
dtype: int32
cat_3.bin:
cardinality: 100003
dtype: int32
cat_4.bin:
cardinality: 100004
dtype: int32
cat_5.bin:
cardinality: 100005
dtype: int32
cat_6.bin:
cardinality: 100006
dtype: int32
cat_7.bin:
cardinality: 100007
dtype: int32
cat_8.bin:
cardinality: 100008
dtype: int32
cat_9.bin:
cardinality: 100009
dtype: int32
label:
dtype: bool
num_0:
dtype: float16
num_1:
dtype: float16
num_2:
dtype: float16
num_3:
dtype: float16
num_4:
dtype: float16
num_5:
dtype: float16
num_6:
dtype: float16
num_7:
dtype: float16
num_8:
dtype: float16
num_9:
dtype: float16
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: split_binary
- features:
- label
files:
- test/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- test/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- test/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- test/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- test/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- test/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- test/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- test/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- test/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- test/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- test/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- test/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- test/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- test/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- test/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- test/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- test/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- test/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- test/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- test/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- test/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- test/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- test/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- test/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- test/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- test/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- test/cat_25.bin
type: split_binary
train:
- features: *id001
files:
- train/numerical.bin
type: split_binary
- features:
- label
files:
- train/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- train/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- train/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- train/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- train/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- train/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- train/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- train/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- train/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- train/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- train/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- train/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- train/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- train/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- train/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- train/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- train/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- train/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- train/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- train/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- train/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- train/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- train/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- train/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- train/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- train/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- train/cat_25.bin
type: split_binary
|
PyTorch/SpeechRecognition/Jasper/configs | configs | jasper10x5dr_speedp-offline_speca_nomask | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: "Jasper"
labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"]
input_val:
audio_dataset: &val_dataset
sample_rate: &sample_rate 16000
trim_silence: true
normalize_transcripts: true
filterbank_features: &val_features
normalize: per_feature
sample_rate: *sample_rate
window_size: 0.02
window_stride: 0.01
window: hann
n_filt: &n_filt 64
n_fft: 512
frame_splicing: &frame_splicing 1
dither: 0.00001
pad_align: 16
# For training we keep samples < 16.7s and apply augmentation
input_train:
audio_dataset:
<<: *val_dataset
max_duration: 16.7
ignore_offline_speed_perturbation: false
filterbank_features:
<<: *val_features
max_duration: 16.7
spec_augment:
freq_masks: 2
max_freq: 20
time_masks: 2
max_time: 75
jasper:
encoder:
init: xavier_uniform
in_feats: *n_filt
frame_splicing: *frame_splicing
activation: relu
use_conv_masks: false
blocks:
- &Conv1
filters: 256
repeat: 1
kernel_size: [11]
stride: [2]
dilation: [1]
dropout: 0.2
residual: false
- &B1
filters: 256
repeat: 5
kernel_size: [11]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B1
- &B2
filters: 384
repeat: 5
kernel_size: [13]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B2
- &B3
filters: 512
repeat: 5
kernel_size: [17]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B3
- &B4
filters: 640
repeat: 5
kernel_size: [21]
stride: [1]
dilation: [1]
dropout: 0.3
residual: true
residual_dense: true
- *B4
- &B5
filters: 768
repeat: 5
kernel_size: [25]
stride: [1]
dilation: [1]
dropout: 0.3
residual: true
residual_dense: true
- *B5
- &Conv2
filters: 896
repeat: 1
kernel_size: [29]
stride: [1]
dilation: [2]
dropout: 0.4
residual: false
- &Conv3
filters: &enc_feats 1024
repeat: 1
kernel_size: [1]
stride: [1]
dilation: [1]
dropout: 0.4
residual: false
decoder:
in_feats: *enc_feats
init: xavier_uniform
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets | datasets | coco | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import os
import torch
import torchvision
import torch.multiprocessing as mp
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root, remove_images_without_annotations, transforms=None
):
super(COCODataset, self).__init__(root, ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
self.ids = [
img_id
for img_id in self.ids
if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0
]
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self._transforms = transforms
def build_target(self, anno, img_size, pin_memory=False):
# filter crowd annotations
# TODO might be better to add an extra field
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.tensor(boxes, dtype=torch.float32, pin_memory=pin_memory).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img_size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes, dtype=torch.float32, pin_memory=pin_memory)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img_size, pin_memory=pin_memory)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
return target
def __getitem__(self, idx):
img = torchvision.io.read_image(self.get_raw_img_info(idx), torchvision.io.image.ImageReadMode.RGB)
target = self.get_target(idx)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target, idx
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
def get_raw_img_info(self, index):
img_id = self.ids[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
return os.path.join(self.root, path)
def get_target(self, index, pin_memory=False):
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
anno = self.coco.loadAnns(ann_ids)
img_size = (self.coco.imgs[img_id]["width"], self.coco.imgs[img_id]["height"])
return self.build_target(anno, img_size, pin_memory=pin_memory)
class HybridDataLoader(object):
def __init__(self, cfg, is_train, batch_size, batch_sampler, dataset, collator, transforms, size_divisible):
assert(dataset._transforms is None), "dataset._transforms must be None when hybrid dataloader is selected"
self.batch_size = batch_size
self.length = len(batch_sampler)
self.batch_sampler = iter(batch_sampler)
self.dataset = dataset
self.transforms = transforms
self.size_divisible = size_divisible
def __iter__(self):
return self
def __len__(self):
return self.length
def __next__(self):
images, targets, idxs = [], [], []
for idx in next(self.batch_sampler):
raw_image = torchvision.io.read_image(self.dataset.get_raw_img_info(idx), torchvision.io.image.ImageReadMode.RGB).pin_memory().to(device='cuda', non_blocking=True)
raw_target = self.dataset.get_target(idx, pin_memory=True)
image, target = self.transforms(raw_image, raw_target)
images.append( image )
targets.append( target )
idxs.append( idx )
images = to_image_list(images, self.size_divisible)
return images, targets, idxs |
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data | data | build | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved..
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import bisect
import copy
import logging
import torch.utils.data
from maskrcnn_benchmark.data.datasets.coco import HybridDataLoader
from maskrcnn_benchmark.utils.comm import get_world_size
from maskrcnn_benchmark.utils.imports import import_file
from . import datasets as D
from . import samplers
from .collate_batch import BatchCollator
from .transforms import build_transforms
def build_dataset(dataset_list, transforms, dataset_catalog, is_train=True):
"""
Arguments:
dataset_list (list[str]): Contains the names of the datasets, i.e.,
coco_2014_trian, coco_2014_val, etc
transforms (callable): transforms to apply to each (image, target) sample
dataset_catalog (DatasetCatalog): contains the information on how to
construct a dataset.
is_train (bool): whether to setup the dataset for training or testing
"""
if not isinstance(dataset_list, (list, tuple)):
raise RuntimeError(
"dataset_list should be a list of strings, got {}".format(dataset_list)
)
datasets = []
total_datasets_size = 0
for dataset_name in dataset_list:
data = dataset_catalog.get(dataset_name)
factory = getattr(D, data["factory"])
args = data["args"]
# for COCODataset, we want to remove images without annotations
# during training
if data["factory"] == "COCODataset":
args["remove_images_without_annotations"] = is_train
if data["factory"] == "PascalVOCDataset":
args["use_difficult"] = not is_train
args["transforms"] = transforms
# make dataset from factory
dataset = factory(**args)
total_datasets_size += len(dataset)
datasets.append(dataset)
# for testing, return a list of datasets
if not is_train:
return datasets, total_datasets_size
# for training, concatenate all datasets into a single one
dataset = datasets[0]
if len(datasets) > 1:
dataset = D.ConcatDataset(datasets)
return [dataset], total_datasets_size
def make_data_sampler(dataset, shuffle, distributed):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def _quantize(x, bins):
bins = copy.copy(bins)
bins = sorted(bins)
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
img_info = dataset.get_img_info(i)
aspect_ratio = float(img_info["height"]) / float(img_info["width"])
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_batch, num_iters=None, start_iter=0
):
if aspect_grouping:
if not isinstance(aspect_grouping, (list, tuple)):
aspect_grouping = [aspect_grouping]
aspect_ratios = _compute_aspect_ratios(dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, images_per_batch, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_batch, drop_last=False
)
if num_iters is not None:
batch_sampler = samplers.IterationBasedBatchSampler(
batch_sampler, num_iters, start_iter
)
return batch_sampler
def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0):
num_gpus = get_world_size()
if is_train:
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
shuffle = True
num_iters = cfg.SOLVER.MAX_ITER
else:
images_per_batch = cfg.TEST.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "TEST.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
shuffle = False if not is_distributed else True
num_iters = None
start_iter = 0
if images_per_gpu > 1:
logger = logging.getLogger(__name__)
logger.warning(
"When using more than one image per GPU you may encounter "
"an out-of-memory (OOM) error if your GPU does not have "
"sufficient memory. If this happens, you can reduce "
"SOLVER.IMS_PER_BATCH (for training) or "
"TEST.IMS_PER_BATCH (for inference). For training, you must "
"also adjust the learning rate and schedule length according "
"to the linear scaling rule. See for example: "
"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14"
)
# group images which have similar aspect ratio. In this case, we only
# group in two cases: those with width / height > 1, and the other way around,
# but the code supports more general grouping strategy
aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []
hybrid_dataloader = cfg.DATALOADER.HYBRID
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", cfg.PATHS_CATALOG, True
)
DatasetCatalog = paths_catalog.DatasetCatalog
dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST
transforms = build_transforms(cfg, is_train)
if hybrid_dataloader:
datasets, epoch_size = build_dataset(dataset_list, None, DatasetCatalog, is_train)
else:
datasets, epoch_size = build_dataset(dataset_list, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
sampler = make_data_sampler(dataset, shuffle, is_distributed)
batch_sampler = make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter
)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
if hybrid_dataloader:
data_loader = HybridDataLoader(cfg, is_train,
images_per_gpu, batch_sampler,
dataset, collator,
transforms,
cfg.DATALOADER.SIZE_DIVISIBILITY)
else:
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator)
data_loaders.append(data_loader)
if is_train:
# during training, a single (possibly concatenated) data_loader is returned
assert len(data_loaders) == 1
iterations_per_epoch = epoch_size // images_per_batch
return data_loaders[0], iterations_per_epoch
return data_loaders
|
PyTorch/SpeechSynthesis/HiFiGAN | HiFiGAN | models | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
import sys
import torch
from common.text.symbols import get_symbols, get_pad_idx
from common.utils import DefaultAttrDict, AttrDict
from fastpitch.model import FastPitch
from fastpitch.model_jit import FastPitchJIT
from hifigan.models import Generator
try:
from waveglow.model import WaveGlow
from waveglow import model as glow
from waveglow.denoiser import Denoiser
sys.modules['glow'] = glow
except ImportError:
print("WARNING: Couldn't import WaveGlow")
def parse_model_args(model_name, parser, add_help=False):
if model_name == 'FastPitch':
from fastpitch import arg_parser
return arg_parser.parse_fastpitch_args(parser, add_help)
elif model_name == 'HiFi-GAN':
from hifigan import arg_parser
return arg_parser.parse_hifigan_args(parser, add_help)
elif model_name == 'WaveGlow':
from waveglow.arg_parser import parse_waveglow_args
return parse_waveglow_args(parser, add_help)
else:
raise NotImplementedError(model_name)
def get_model(model_name, model_config, device, bn_uniform_init=False,
forward_is_infer=False, jitable=False):
"""Chooses a model based on name"""
del bn_uniform_init # unused (old name: uniform_initialize_bn_weight)
if model_name == 'FastPitch':
if jitable:
model = FastPitchJIT(**model_config)
else:
model = FastPitch(**model_config)
elif model_name == 'HiFi-GAN':
model = Generator(model_config)
elif model_name == 'WaveGlow':
model = WaveGlow(**model_config)
else:
raise NotImplementedError(model_name)
if forward_is_infer and hasattr(model, 'infer'):
model.forward = model.infer
return model.to(device)
def get_model_config(model_name, args, ckpt_config=None):
""" Get config needed to instantiate the model """
# Mark keys missing in `args` with an object (None is ambiguous)
_missing = object()
args = DefaultAttrDict(lambda: _missing, vars(args))
# `ckpt_config` is loaded from the checkpoint and has the priority
# `model_config` is based on args and fills empty slots in `ckpt_config`
if model_name == 'FastPitch':
model_config = dict(
# io
n_mel_channels=args.n_mel_channels,
# symbols
n_symbols=(len(get_symbols(args.symbol_set))
if args.symbol_set is not _missing else _missing),
padding_idx=(get_pad_idx(args.symbol_set)
if args.symbol_set is not _missing else _missing),
symbols_embedding_dim=args.symbols_embedding_dim,
# input FFT
in_fft_n_layers=args.in_fft_n_layers,
in_fft_n_heads=args.in_fft_n_heads,
in_fft_d_head=args.in_fft_d_head,
in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,
in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,
in_fft_output_size=args.in_fft_output_size,
p_in_fft_dropout=args.p_in_fft_dropout,
p_in_fft_dropatt=args.p_in_fft_dropatt,
p_in_fft_dropemb=args.p_in_fft_dropemb,
# output FFT
out_fft_n_layers=args.out_fft_n_layers,
out_fft_n_heads=args.out_fft_n_heads,
out_fft_d_head=args.out_fft_d_head,
out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,
out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,
out_fft_output_size=args.out_fft_output_size,
p_out_fft_dropout=args.p_out_fft_dropout,
p_out_fft_dropatt=args.p_out_fft_dropatt,
p_out_fft_dropemb=args.p_out_fft_dropemb,
# duration predictor
dur_predictor_kernel_size=args.dur_predictor_kernel_size,
dur_predictor_filter_size=args.dur_predictor_filter_size,
p_dur_predictor_dropout=args.p_dur_predictor_dropout,
dur_predictor_n_layers=args.dur_predictor_n_layers,
# pitch predictor
pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,
pitch_predictor_filter_size=args.pitch_predictor_filter_size,
p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,
pitch_predictor_n_layers=args.pitch_predictor_n_layers,
# pitch conditioning
pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,
# speakers parameters
n_speakers=args.n_speakers,
speaker_emb_weight=args.speaker_emb_weight,
# energy predictor
energy_predictor_kernel_size=args.energy_predictor_kernel_size,
energy_predictor_filter_size=args.energy_predictor_filter_size,
p_energy_predictor_dropout=args.p_energy_predictor_dropout,
energy_predictor_n_layers=args.energy_predictor_n_layers,
# energy conditioning
energy_conditioning=args.energy_conditioning,
energy_embedding_kernel_size=args.energy_embedding_kernel_size,
)
elif model_name == 'HiFi-GAN':
if args.hifigan_config is not None:
assert ckpt_config is None, (
"Supplied --hifigan-config, but the checkpoint has a config. "
"Drop the flag or remove the config from the checkpoint file.")
print(f'HiFi-GAN: Reading model config from {args.hifigan_config}')
with open(args.hifigan_config) as f:
args = AttrDict(json.load(f))
model_config = dict(
# generator architecture
upsample_rates=args.upsample_rates,
upsample_kernel_sizes=args.upsample_kernel_sizes,
upsample_initial_channel=args.upsample_initial_channel,
resblock=args.resblock,
resblock_kernel_sizes=args.resblock_kernel_sizes,
resblock_dilation_sizes=args.resblock_dilation_sizes,
)
elif model_name == 'WaveGlow':
model_config = dict(
n_mel_channels=args.n_mel_channels,
n_flows=args.flows,
n_group=args.groups,
n_early_every=args.early_every,
n_early_size=args.early_size,
WN_config=dict(
n_layers=args.wn_layers,
kernel_size=args.wn_kernel_size,
n_channels=args.wn_channels
)
)
else:
raise NotImplementedError(model_name)
# Start with ckpt_config, and fill missing keys from model_config
final_config = {} if ckpt_config is None else ckpt_config.copy()
missing_keys = set(model_config.keys()) - set(final_config.keys())
final_config.update({k: model_config[k] for k in missing_keys})
# If there was a ckpt_config, it should have had all args
if ckpt_config is not None and len(missing_keys) > 0:
print(f'WARNING: Keys {missing_keys} missing from the loaded config; '
'using args instead.')
assert all(v is not _missing for v in final_config.values())
return final_config
def get_model_train_setup(model_name, args):
""" Dump train setup for documentation purposes """
if model_name == 'FastPitch':
return dict()
elif model_name == 'HiFi-GAN':
return dict(
# audio
segment_size=args.segment_size,
filter_length=args.filter_length,
num_mels=args.num_mels,
hop_length=args.hop_length,
win_length=args.win_length,
sampling_rate=args.sampling_rate,
mel_fmin=args.mel_fmin,
mel_fmax=args.mel_fmax,
mel_fmax_loss=args.mel_fmax_loss,
max_wav_value=args.max_wav_value,
# other
seed=args.seed,
# optimization
base_lr=args.learning_rate,
lr_decay=args.lr_decay,
epochs_all=args.epochs,
)
elif model_name == 'WaveGlow':
return dict()
else:
raise NotImplementedError(model_name)
def load_model_from_ckpt(checkpoint_data, model, key='state_dict'):
if key is None:
return checkpoint_data['model'], None
sd = checkpoint_data[key]
sd = {re.sub('^module\.', '', k): v for k, v in sd.items()}
status = model.load_state_dict(sd, strict=False)
return model, status
def load_and_setup_model(model_name, parser, checkpoint, amp, device,
unk_args=[], forward_is_infer=False, jitable=False):
if checkpoint is not None:
ckpt_data = torch.load(checkpoint)
print(f'{model_name}: Loading {checkpoint}...')
ckpt_config = ckpt_data.get('config')
if ckpt_config is None:
print(f'{model_name}: No model config in the checkpoint; using args.')
else:
print(f'{model_name}: Found model config saved in the checkpoint.')
else:
ckpt_config = None
ckpt_data = {}
model_parser = parse_model_args(model_name, parser, add_help=False)
model_args, model_unk_args = model_parser.parse_known_args()
unk_args[:] = list(set(unk_args) & set(model_unk_args))
model_config = get_model_config(model_name, model_args, ckpt_config)
model = get_model(model_name, model_config, device,
forward_is_infer=forward_is_infer,
jitable=jitable)
if checkpoint is not None:
key = 'generator' if model_name == 'HiFi-GAN' else 'state_dict'
model, status = load_model_from_ckpt(ckpt_data, model, key)
missing = [] if status is None else status.missing_keys
unexpected = [] if status is None else status.unexpected_keys
# Attention is only used during training, we won't miss it
if model_name == 'FastPitch':
missing = [k for k in missing if not k.startswith('attention.')]
unexpected = [k for k in unexpected if not k.startswith('attention.')]
assert len(missing) == 0 and len(unexpected) == 0, (
f'Mismatched keys when loading parameters. Missing: {missing}, '
f'unexpected: {unexpected}.')
if model_name == "WaveGlow":
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
model = model.remove_weightnorm(model)
elif model_name == 'HiFi-GAN':
assert model_args.hifigan_config is not None or ckpt_config is not None, (
'Use a HiFi-GAN checkpoint from NVIDIA DeepLearningExamples with '
'saved config or supply --hifigan-config <json_file>.')
model.remove_weight_norm()
if amp:
model.half()
model.eval()
return model.to(device), model_config, ckpt_data.get('train_setup', {})
def load_and_setup_ts_model(model_name, checkpoint, amp, device=None):
print(f'{model_name}: Loading TorchScript checkpoint {checkpoint}...')
model = torch.jit.load(checkpoint).eval()
if device is not None:
model = model.to(device)
if amp:
model.half()
elif next(model.parameters()).dtype == torch.float16:
raise ValueError('Trying to load FP32 model,'
'TS checkpoint is in FP16 precision.')
return model
def convert_ts_to_trt(model_name, ts_model, parser, amp, unk_args=[]):
trt_parser = _parse_trt_compilation_args(model_name, parser, add_help=False)
trt_args, trt_unk_args = trt_parser.parse_known_args()
unk_args[:] = list(set(unk_args) & set(trt_unk_args))
if model_name == 'HiFi-GAN':
return _convert_ts_to_trt_hifigan(
ts_model, amp, trt_args.trt_min_opt_max_batch,
trt_args.trt_min_opt_max_hifigan_length)
else:
raise NotImplementedError
def _parse_trt_compilation_args(model_name, parent, add_help=False):
"""
Parse model and inference specific commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help,
allow_abbrev=False)
trt = parser.add_argument_group(f'{model_name} Torch-TensorRT compilation parameters')
trt.add_argument('--trt-min-opt-max-batch', nargs=3, type=int,
default=(1, 8, 16),
help='Torch-TensorRT min, optimal and max batch size')
if model_name == 'HiFi-GAN':
trt.add_argument('--trt-min-opt-max-hifigan-length', nargs=3, type=int,
default=(100, 800, 1200),
help='Torch-TensorRT min, optimal and max audio length (in frames)')
return parser
def _convert_ts_to_trt_hifigan(ts_model, amp, trt_min_opt_max_batch,
trt_min_opt_max_hifigan_length, num_mels=80):
import torch_tensorrt
trt_dtype = torch.half if amp else torch.float
print(f'Torch TensorRT: compiling HiFi-GAN for dtype {trt_dtype}.')
min_shp, opt_shp, max_shp = zip(trt_min_opt_max_batch,
(num_mels,) * 3,
trt_min_opt_max_hifigan_length)
compile_settings = {
"inputs": [torch_tensorrt.Input(
min_shape=min_shp,
opt_shape=opt_shp,
max_shape=max_shp,
dtype=trt_dtype,
)],
"enabled_precisions": {trt_dtype},
"require_full_compilation": True,
}
trt_model = torch_tensorrt.compile(ts_model, **compile_settings)
print('Torch TensorRT: compilation successful.')
return trt_model
|
TensorFlow/Classification/ConvNets/utils | utils | dali_utils | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
from utils import image_processing
from utils import hvd_wrapper as hvd
from nvidia import dali
import nvidia.dali.plugin.tf as dali_tf
__all__ = ["get_synth_input_fn", "normalized_inputs"]
class HybridPipe(dali.pipeline.Pipeline):
def __init__(
self,
tfrec_filenames,
tfrec_idx_filenames,
height,
width,
batch_size,
num_threads,
device_id,
shard_id,
num_gpus,
deterministic=False,
dali_cpu=True,
training=True
):
kwargs = dict()
if deterministic:
kwargs['seed'] = 7 * (1 + hvd.rank())
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, **kwargs)
self.training = training
self.input = dali.ops.TFRecordReader(
path=tfrec_filenames,
index_path=tfrec_idx_filenames,
random_shuffle=True,
shard_id=shard_id,
num_shards=num_gpus,
initial_fill=10000,
features={
'image/encoded': dali.tfrecord.FixedLenFeature((), dali.tfrecord.string, ""),
'image/class/label': dali.tfrecord.FixedLenFeature([1], dali.tfrecord.int64, -1),
'image/class/text': dali.tfrecord.FixedLenFeature([], dali.tfrecord.string, ''),
'image/object/bbox/xmin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/xmax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0)
}
)
if self.training:
self.decode = dali.ops.ImageDecoderRandomCrop(
device="cpu" if dali_cpu else "mixed",
output_type=dali.types.RGB,
random_aspect_ratio=[0.75, 1.33],
random_area=[0.05, 1.0],
num_attempts=100
)
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_x=width, resize_y=height)
else:
self.decode = dali.ops.ImageDecoder(device="cpu" if dali_cpu else "mixed", output_type=dali.types.RGB)
# Make sure that every image > 224 for CropMirrorNormalize
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_shorter=256)
self.normalize = dali.ops.CropMirrorNormalize(
device="gpu",
output_dtype=dali.types.FLOAT,
crop=(height, width),
image_type=dali.types.RGB,
mean=[123.68, 116.28, 103.53],
std=[58.395, 57.120, 57.385],
output_layout=dali.types.NHWC
)
self.cast_float = dali.ops.Cast(device="gpu", dtype=dali.types.FLOAT)
self.mirror = dali.ops.CoinFlip()
self.iter = 0
def define_graph(self):
# Read images and labels
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
labels = inputs["image/class/label"].gpu()
# Decode and augmentation
images = self.decode(images)
images = self.resize(images)
images = self.normalize(images.gpu(), mirror=self.mirror() if self.training else None)
return (images, labels)
class DALIPreprocessor(object):
def __init__(
self,
filenames,
idx_filenames,
height,
width,
batch_size,
num_threads,
dtype=tf.uint8,
dali_cpu=True,
deterministic=False,
training=False
):
device_id = hvd.local_rank()
shard_id = hvd.rank()
num_gpus = hvd.size()
pipe = HybridPipe(
tfrec_filenames=filenames,
tfrec_idx_filenames=idx_filenames,
height=height,
width=width,
batch_size=batch_size,
num_threads=num_threads,
device_id=device_id,
shard_id=shard_id,
num_gpus=num_gpus,
deterministic=deterministic,
dali_cpu=dali_cpu,
training=training
)
daliop = dali_tf.DALIIterator()
with tf.device("/gpu:0"):
self.images, self.labels = daliop(
pipeline=pipe,
shapes=[(batch_size, height, width, 3), (batch_size, 1)],
dtypes=[tf.float32, tf.int64],
device_id=device_id
)
def get_device_minibatches(self):
with tf.device("/gpu:0"):
self.labels -= 1 # Change to 0-based (don't use background class)
self.labels = tf.squeeze(self.labels, axis=-1)
return self.images, self.labels
|
PyTorch/Detection/Efficientdet/effdet/csrc/nms | nms | nms | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "cpu/vision.h"
#ifdef WITH_CUDA
#include "cuda/vision.h"
#endif
at::Tensor nms(const at::Tensor& dets,
const at::Tensor& scores,
const float threshold) {
if (dets.is_cuda()) {
#ifdef WITH_CUDA
// TODO raise error if not compiled with CUDA
if (dets.numel() == 0)
return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
auto b = at::cat({dets, scores.unsqueeze(1)}, 1);
return nms_cuda(b, threshold);
#else
AT_ERROR("Not compiled with GPU support");
#endif
}
at::Tensor result = nms_cpu(dets, scores, threshold);
return result;
}
|
TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/utils | utils | workflow | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import cudf
import cupy
import numpy as np
import nvtabular as nvt
import rmm
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from data.outbrain.features import get_features_keys
from data.outbrain.nvtabular.utils.feature_description import (
CATEGORICAL_COLUMNS, CTR_INPUTS, DISPLAY_ID_COLUMN)
from nvtabular import ColumnGroup
from nvtabular.io import Shuffle
from nvtabular.ops import (Categorify, ColumnSelector, FillMedian, FillMissing,
HashBucket, JoinExternal, JoinGroupby, LambdaOp,
ListSlice, LogOp, Normalize, Operator, Rename)
from nvtabular.ops.column_similarity import ColumnSimilarity
from nvtabular.utils import device_mem_size, get_rmm_size
TIMESTAMP_DELTA = 1465876799998
def get_devices():
try:
devices = [
int(device) for device in os.environ["CUDA_VISIBLE_DEVICES"].split(",")
]
except KeyError:
from pynvml import nvmlDeviceGetCount, nvmlInit
nvmlInit()
devices = list(range(nvmlDeviceGetCount()))
return devices
class DaysSincePublished(Operator):
def transform(self, columns, gdf):
for column in columns.names:
col = gdf[column]
col.loc[col == ""] = None
col = col.astype("datetime64[ns]")
timestamp = (gdf["timestamp"] + TIMESTAMP_DELTA).astype("datetime64[ms]")
delta = (timestamp - col).dt.days
gdf[column + "_days_since_published"] = (
delta * (delta >= 0) * (delta <= 10 * 365)
)
return gdf
def output_column_names(self, columns):
return ColumnSelector(
[column + "_days_since_published" for column in columns.names]
)
def dependencies(self):
return ["timestamp"]
def _df_to_coo(df, row="document_id", col=None, data="confidence_level"):
return cupy.sparse.coo_matrix((df[data].values, (df[row].values, df[col].values)))
def setup_rmm_pool(client, pool_size):
pool_size = get_rmm_size(pool_size)
client.run(rmm.reinitialize, pool_allocator=True, initial_pool_size=pool_size)
return None
def create_client(devices, local_directory):
client = None
if len(devices) > 1:
device_size = device_mem_size(kind="total")
device_limit = int(0.8 * device_size)
device_pool_size = int(0.8 * device_size)
cluster = LocalCUDACluster(
n_workers=len(devices),
CUDA_VISIBLE_DEVICES=",".join(str(x) for x in devices),
device_memory_limit=device_limit,
local_directory=local_directory,
)
client = Client(cluster)
setup_rmm_pool(client, device_pool_size)
return client
def create_workflow(data_bucket_folder, hash_spec, devices, local_directory, dask):
rmm.reinitialize(managed_memory=False)
documents_categories_path = os.path.join(
data_bucket_folder, "documents_categories.csv"
)
documents_topics_path = os.path.join(data_bucket_folder, "documents_topics.csv")
documents_entities_path = os.path.join(data_bucket_folder, "documents_entities.csv")
documents_categories_cudf = cudf.read_csv(documents_categories_path)
documents_topics_cudf = cudf.read_csv(documents_topics_path)
documents_entities_cudf = cudf.read_csv(documents_entities_path)
documents_entities_cudf["entity_id"] = (
documents_entities_cudf["entity_id"].astype("category").cat.codes
)
documents_categories_grouped = (
documents_categories_cudf.groupby("document_id")
.agg({"category_id": "collect", "confidence_level": "collect"})
.reset_index()
)
documents_categories_grouped = documents_categories_grouped.rename(
columns={
"category_id": "category_id_list",
"confidence_level": "confidence_level_cat_list",
}
)
documents_entities_grouped = (
documents_entities_cudf.groupby("document_id")
.agg({"entity_id": "collect", "confidence_level": "collect"})
.reset_index()
)
documents_entities_grouped = documents_entities_grouped.rename(
columns={
"entity_id": "entity_id_list",
"confidence_level": "confidence_level_ent_list",
}
)
documents_topics_grouped = (
documents_topics_cudf.groupby("document_id")
.agg({"topic_id": "collect", "confidence_level": "collect"})
.reset_index()
)
documents_topics_grouped = documents_topics_grouped.rename(
columns={
"topic_id": "topic_id_list",
"confidence_level": "confidence_level_top_list",
}
)
categories = _df_to_coo(documents_categories_cudf, col="category_id")
topics = _df_to_coo(documents_topics_cudf, col="topic_id")
entities = _df_to_coo(documents_entities_cudf, col="entity_id")
del documents_categories_cudf, documents_topics_cudf, documents_entities_cudf
ctr_thresh = {
"ad_id": 5,
"source_id_promo": 10,
"publisher_id_promo": 10,
"advertiser_id": 10,
"campaign_id": 10,
"document_id_promo": 5,
}
cat_cols = ColumnGroup(CATEGORICAL_COLUMNS)
def get_slice(num_char):
def lambda_slice(col, gdf):
return col.str.slice(0, num_char)
return lambda_slice
geo_location = ColumnGroup(["geo_location"])
country = geo_location >> LambdaOp(get_slice(2)) >> Rename(postfix="_country")
state = geo_location >> LambdaOp(get_slice(5)) >> Rename(postfix="_state")
geo_features = geo_location + country + state
dates = ["publish_time", "publish_time_promo"]
date_features = dates >> DaysSincePublished() >> FillMedian() >> LogOp
ctr_inputs = ColumnGroup(CTR_INPUTS)
stat_cols = ctr_inputs >> JoinGroupby(cont_cols=["clicked"], stats=["sum", "count"])
def calculate_ctr_with_filter(col, gdf):
col = col.astype(np.float32)
ctr_col_name = col.name.replace("_clicked_sum", "")
ctr_count_name = col.name.replace("_clicked_sum", "_count")
col = col / gdf[ctr_count_name] # CTR
col = col.where(gdf[ctr_count_name] >= ctr_thresh[ctr_col_name], 0) # Filter
return col
ctr_selected_features = [column + "_clicked_sum" for column in ctr_inputs.names]
dependency_features = [column + "_count" for column in ctr_inputs.names]
ctr_cols = (
stat_cols[ctr_selected_features]
>> LambdaOp(
calculate_ctr_with_filter, dependency=stat_cols[dependency_features]
)
>> Rename(f=lambda x: x.replace("_clicked_sum", "_ctr"))
)
stat_cols = stat_cols >> FillMissing() >> LogOp() >> Normalize()
ctr_cols = ctr_cols >> FillMissing()
cat_cols = cat_cols + geo_features >> HashBucket(dict(list(hash_spec.items())[:-3]))
sim_features_categories = (
[["document_id", "document_id_promo"]]
>> ColumnSimilarity(categories, metric="tfidf", on_device=False)
>> Rename(postfix="_categories")
)
sim_features_topics = (
[["document_id", "document_id_promo"]]
>> ColumnSimilarity(topics, metric="tfidf", on_device=False)
>> Rename(postfix="_topics")
)
sim_features_entities = (
[["document_id", "document_id_promo"]]
>> ColumnSimilarity(entities, metric="tfidf", on_device=False)
>> Rename(postfix="_entities")
)
sim_features = sim_features_categories + sim_features_topics + sim_features_entities
joined = ["document_id"] >> JoinExternal(
documents_categories_grouped,
on=["document_id"],
on_ext=["document_id"],
how="left",
columns_ext=["category_id_list", "confidence_level_cat_list", "document_id"],
cache="device",
)
joined = joined >> JoinExternal(
documents_entities_grouped,
on=["document_id"],
on_ext=["document_id"],
how="left",
columns_ext=["entity_id_list", "confidence_level_ent_list", "document_id"],
cache="device",
)
joined = joined >> JoinExternal(
documents_topics_grouped,
on=["document_id"],
on_ext=["document_id"],
how="left",
columns_ext=["topic_id_list", "confidence_level_top_list", "document_id"],
cache="device",
)
categorified_multihots = (
joined[["topic_id_list", "entity_id_list", "category_id_list"]]
>> Categorify()
>> FillMissing()
>> ListSlice(3)
>> HashBucket(dict(list(hash_spec.items())[-3:]))
)
features = (
date_features
+ ctr_cols
+ stat_cols
+ cat_cols
+ sim_features
+ categorified_multihots
+ ["clicked", "display_id"]
)
client = (
create_client(devices=devices, local_directory=local_directory)
if dask
else None
)
required_features = get_features_keys() + ["clicked"]
workflow = nvt.Workflow(features[required_features], client=client)
return workflow
def create_parquets(data_bucket_folder, train_path, valid_path):
cupy.random.seed(seed=0)
rmm.reinitialize(managed_memory=True)
documents_meta_path = os.path.join(data_bucket_folder, "documents_meta.csv")
clicks_train_path = os.path.join(data_bucket_folder, "clicks_train.csv")
events_path = os.path.join(data_bucket_folder, "events.csv")
promoted_content_path = os.path.join(data_bucket_folder, "promoted_content.csv")
documents_meta = cudf.read_csv(documents_meta_path, na_values=["\\N", ""])
documents_meta["publisher_id"].fillna(
documents_meta["publisher_id"].isnull().cumsum()
+ documents_meta["publisher_id"].max()
+ 1,
inplace=True,
)
merged = (
cudf.read_csv(clicks_train_path, na_values=["\\N", ""])
.merge(
cudf.read_csv(events_path, na_values=["\\N", ""]),
on=DISPLAY_ID_COLUMN,
how="left",
suffixes=("", "_event"),
)
.merge(
cudf.read_csv(promoted_content_path, na_values=["\\N", ""]),
on="ad_id",
how="left",
suffixes=("", "_promo"),
)
.merge(documents_meta, on="document_id", how="left")
.merge(
documents_meta,
left_on="document_id_promo",
right_on="document_id",
how="left",
suffixes=("", "_promo"),
)
)
merged["day_event"] = (merged["timestamp"] / 1000 / 60 / 60 / 24).astype(int)
merged["platform"] = merged["platform"].fillna(1)
merged["platform"] = merged["platform"] - 1
display_event = (
merged[[DISPLAY_ID_COLUMN, "day_event"]].drop_duplicates().reset_index()
)
random_state = cudf.Series(cupy.random.uniform(size=len(display_event)))
valid_ids, train_ids = display_event.scatter_by_map(
((display_event.day_event <= 10) & (random_state > 0.2)).astype(int)
)
valid_ids = valid_ids[DISPLAY_ID_COLUMN].drop_duplicates()
train_ids = train_ids[DISPLAY_ID_COLUMN].drop_duplicates()
valid_set = merged[merged[DISPLAY_ID_COLUMN].isin(valid_ids)]
train_set = merged[merged[DISPLAY_ID_COLUMN].isin(train_ids)]
valid_set = valid_set.sort_values(DISPLAY_ID_COLUMN)
train_set.to_parquet(train_path, compression=None)
valid_set.to_parquet(valid_path, compression=None)
del merged, train_set, valid_set
def save_stats(
data_bucket_folder,
output_train_folder,
train_path,
output_valid_folder,
valid_path,
stats_file,
hash_spec,
local_directory,
dask,
):
devices = get_devices()
shuffle = Shuffle.PER_PARTITION if len(devices) > 1 else True
workflow = create_workflow(
data_bucket_folder=data_bucket_folder,
hash_spec=hash_spec,
devices=devices,
local_directory=local_directory,
dask=dask,
)
train_dataset = nvt.Dataset(train_path, part_size="150MB")
valid_dataset = nvt.Dataset(valid_path, part_size="150MB")
workflow.fit(train_dataset)
workflow.transform(train_dataset).to_parquet(
output_path=output_train_folder, shuffle=shuffle, out_files_per_proc=8
)
workflow.transform(valid_dataset).to_parquet(
output_path=output_valid_folder, shuffle=None, output_files=8
)
workflow.save(stats_file)
return workflow
def clean(path):
shutil.rmtree(path)
def execute_pipeline(config):
required_folders = [
config["temporary_folder"],
config["output_train_folder"],
config["output_valid_folder"],
]
for folder in required_folders:
os.makedirs(folder, exist_ok=True)
create_parquets(
data_bucket_folder=config["data_bucket_folder"],
train_path=config["train_path"],
valid_path=config["valid_path"],
)
save_stats(
data_bucket_folder=config["data_bucket_folder"],
output_train_folder=config["output_train_folder"],
train_path=config["train_path"],
output_valid_folder=config["output_valid_folder"],
valid_path=config["valid_path"],
stats_file=config["stats_file"],
hash_spec=config["hash_spec"],
local_directory=config["temporary_folder"],
dask=config["dask"],
)
clean(config["temporary_folder"])
clean("./categories")
|
TensorFlow2/Segmentation/Contrib/UNet3P/data_generators | data_generators | data_generator | """
Data generator
"""
import os
import tensorflow as tf
from omegaconf import DictConfig
from utils.general_utils import join_paths, get_gpus_count
from .tf_data_generator import DataGenerator as tf_data_generator
try:
from .dali_data_generator import data_generator as dali_data_generator
except ModuleNotFoundError:
print("NVIDIA DALI not installed, please install it."
"\nNote: DALI is only available on Linux platform. For Window "
"you can use TensorFlow generator for training.")
def get_data_generator(cfg: DictConfig,
mode: str,
strategy: tf.distribute.Strategy = None):
"""
Creates and return data generator object based on given type.
"""
if cfg.DATA_GENERATOR_TYPE == "TF_GENERATOR":
print(f"Using TensorFlow generator for {mode} data")
generator = tf_data_generator(cfg, mode)
elif cfg.DATA_GENERATOR_TYPE == "DALI_GENERATOR":
print(f"Using NVIDIA DALI generator for {mode} data")
if cfg.USE_MULTI_GPUS.VALUE:
generator = dali_data_generator(cfg, mode, strategy)
else:
generator = dali_data_generator(cfg, mode)
else:
raise ValueError(
"Wrong generator type passed."
"\nPossible options are TF_GENERATOR and DALI_GENERATOR"
)
return generator
def update_batch_size(cfg: DictConfig):
"""
Scale up batch size to multi gpus in case of TensorFlow generator.
"""
if cfg.DATA_GENERATOR_TYPE == "TF_GENERATOR" and cfg.USE_MULTI_GPUS.VALUE:
# change batch size according to available gpus
cfg.HYPER_PARAMETERS.BATCH_SIZE = \
cfg.HYPER_PARAMETERS.BATCH_SIZE * get_gpus_count()
def get_batch_size(cfg: DictConfig):
"""
Return batch size.
In case of DALI generator scale up batch size to multi gpus.
"""
if cfg.DATA_GENERATOR_TYPE == "DALI_GENERATOR" and cfg.USE_MULTI_GPUS.VALUE:
# change batch size according to available gpus
return cfg.HYPER_PARAMETERS.BATCH_SIZE * get_gpus_count()
else:
return cfg.HYPER_PARAMETERS.BATCH_SIZE
def get_iterations(cfg: DictConfig, mode: str):
"""
Return steps per epoch
"""
images_length = len(
os.listdir(
join_paths(
cfg.WORK_DIR,
cfg.DATASET[mode].IMAGES_PATH
)
)
)
if cfg.DATA_GENERATOR_TYPE == "TF_GENERATOR":
training_steps = images_length // cfg.HYPER_PARAMETERS.BATCH_SIZE
elif cfg.DATA_GENERATOR_TYPE == "DALI_GENERATOR":
if cfg.USE_MULTI_GPUS.VALUE:
training_steps = images_length // (
cfg.HYPER_PARAMETERS.BATCH_SIZE * get_gpus_count())
else:
training_steps = images_length // cfg.HYPER_PARAMETERS.BATCH_SIZE
else:
raise ValueError("Wrong generator type passed.")
return training_steps
|
PyTorch/SpeechSynthesis/HiFiGAN/common/text/unidecoder | unidecoder | homoglyphs | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading | dataloading | dataloader | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected]), Tomasz Cheda ([email protected])
import os
from .defaults import TRAIN_MAPPING, TEST_MAPPING
from .feature_spec import FeatureSpec
from .raw_binary_dataset import TfRawBinaryDataset, DatasetMetadata
from .synthetic_dataset import SyntheticDataset
from .split_tfrecords_multihot_dataset import SplitTFRecordsDataset
def get_dataset_metadata(dataset_path, feature_spec):
fspec_path = os.path.join(dataset_path, feature_spec)
feature_spec = FeatureSpec.from_yaml(fspec_path)
dataset_metadata = DatasetMetadata(num_numerical_features=feature_spec.get_number_of_numerical_features(),
categorical_cardinalities=feature_spec.get_categorical_sizes())
return dataset_metadata
def _create_pipelines_synthetic_fspec(**kwargs):
fspec_path = os.path.join(kwargs['dataset_path'], kwargs['feature_spec'])
feature_spec = FeatureSpec.from_yaml(fspec_path)
dataset_metadata = DatasetMetadata(num_numerical_features=feature_spec.get_number_of_numerical_features(),
categorical_cardinalities=feature_spec.get_categorical_sizes())
local_table_sizes = [dataset_metadata.categorical_cardinalities[i] for i in kwargs['table_ids']]
names = feature_spec.get_categorical_feature_names()
local_names = [names[i] for i in kwargs['table_ids']]
local_table_hotness = [feature_spec.feature_spec[name]["hotness"] for name in local_names]
local_table_alpha = [feature_spec.feature_spec[name]["alpha"] for name in local_names]
print('local table sizes: ', local_table_sizes)
print('Local table hotness: ', local_table_hotness)
train_dataset = SyntheticDataset(batch_size=kwargs['train_batch_size'],
num_numerical_features=dataset_metadata.num_numerical_features,
categorical_feature_cardinalities=local_table_sizes,
categorical_feature_hotness=local_table_hotness,
categorical_feature_alpha=local_table_alpha,
num_batches=kwargs.get('synthetic_dataset_train_batches', int(1e9)),
num_workers=kwargs['world_size'],
variable_hotness=False)
test_dataset = SyntheticDataset(batch_size=kwargs['test_batch_size'],
num_numerical_features=dataset_metadata.num_numerical_features,
categorical_feature_cardinalities=local_table_sizes,
categorical_feature_hotness=local_table_hotness,
categorical_feature_alpha=local_table_alpha,
num_batches=kwargs.get('synthetic_dataset_valid_batches', int(1e9)),
num_workers=kwargs['world_size'],
variable_hotness=False)
return train_dataset, test_dataset
def _create_pipelines_tf_raw(**kwargs):
fspec_path = os.path.join(kwargs['dataset_path'], kwargs['feature_spec'])
feature_spec = FeatureSpec.from_yaml(fspec_path)
local_categorical_names = feature_spec.cat_positions_to_names(kwargs['table_ids'])
train_dataset = TfRawBinaryDataset(feature_spec=feature_spec,
instance=TRAIN_MAPPING,
batch_size=kwargs['train_batch_size'],
numerical_features_enabled=True,
local_categorical_feature_names=local_categorical_names,
rank=kwargs['rank'],
world_size=kwargs['world_size'],
concat_features=kwargs['concat_features'],
data_parallel_categoricals=kwargs['data_parallel_input'])
test_dataset = TfRawBinaryDataset(feature_spec=feature_spec,
instance=TEST_MAPPING,
batch_size=kwargs['test_batch_size'],
numerical_features_enabled=True,
local_categorical_feature_names=local_categorical_names,
rank=kwargs['rank'],
world_size=kwargs['world_size'],
concat_features=kwargs['concat_features'],
data_parallel_categoricals=kwargs['data_parallel_input'])
return train_dataset, test_dataset
def _create_pipelines_split_tfrecords(**kwargs):
fspec_path = os.path.join(kwargs['dataset_path'], kwargs['feature_spec'])
feature_spec = FeatureSpec.from_yaml(fspec_path)
train_dataset = SplitTFRecordsDataset(dataset_dir=feature_spec.base_directory + '/train/',
feature_ids=kwargs['table_ids'],
num_numerical=feature_spec.get_number_of_numerical_features(),
rank=kwargs['rank'], world_size=kwargs['world_size'],
batch_size=kwargs['train_batch_size'])
test_dataset = SplitTFRecordsDataset(dataset_dir=feature_spec.base_directory + '/test/',
feature_ids=kwargs['table_ids'],
num_numerical=feature_spec.get_number_of_numerical_features(),
rank=kwargs['rank'], world_size=kwargs['world_size'],
batch_size=kwargs['test_batch_size'])
return train_dataset, test_dataset
def create_input_pipelines(dataset_type, dataset_path, train_batch_size, test_batch_size,
table_ids, feature_spec, rank=0, world_size=1, concat_features=False,
data_parallel_input=False):
# pass along all arguments except dataset type
kwargs = locals()
del kwargs['dataset_type']
#hardcoded for now
kwargs['synthetic_dataset_use_feature_spec'] = True
if dataset_type == 'synthetic' and not kwargs['synthetic_dataset_use_feature_spec']:
return _create_pipelines_synthetic(**kwargs)
elif dataset_type == 'synthetic' and kwargs['synthetic_dataset_use_feature_spec']: # synthetic based on feature spec
return _create_pipelines_synthetic_fspec(**kwargs)
elif dataset_type == 'tf_raw':
return _create_pipelines_tf_raw(**kwargs)
elif dataset_type == 'split_tfrecords':
return _create_pipelines_split_tfrecords(**kwargs)
else:
raise ValueError(f'Unsupported dataset type: {dataset_type}')
|
PyTorch/LanguageModeling/BERT/scripts | scripts | data_download | #!/usr/bin/env bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DATA_DIR=${1:-/workspace/bert/data}
# Download vocab files from pretrained model
cd vocab && python3 download_models.py && rm *.zip && rm ./*/*.ckpt.*
# Download SQUAD
cd $DATA_DIR/squad && . squad_download.sh
# Download SWAG
git clone https://github.com/rowanz/swagaf.git $DATA_DIR/swag
# Download GLUE
cd $DATA_DIR/glue && . download_mrpc.sh
# WIKI Download
cd $DATA_DIR/wikipedia_corpus && . download_wikipedia.sh
# Bookcorpus Download
cd $DATA_DIR/bookcorpus && . download_bookcorpus.sh
cd $DATA_DIR
# Create HDF5 files for WIKI
bash create_datasets_from_start.sh wikipedia_corpus ./wikipedia_corpus/wikipedia_corpus.txt \
&& rm -r ./wikipedia_corpus/final_* \
# Create HDF5 files for Bookcorpus
bash create_datasets_from_start.sh bookcorpus ./bookcorpus/bookcorpus.txt \
&& rm -r ./bookcorpus/final_* \
# Create HDF5 files for inter sequence-pair mixed Wikipedia and Bookcorpus
bash merge_datasets_after_creation.sh merged_wiki+books wikipedia_corpus/hdf5_shards,bookcorpus/hdf5_shards 1024
|
TensorFlow/Classification/ConvNets | ConvNets | main | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils.cmdline_helper import parse_cmdline
from model.resnet import model_architectures
from runtime import Runner
import dllogger
from utils import hvd_wrapper as hvd
import tensorflow as tf
import os
import warnings
warnings.simplefilter("ignore")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.ERROR)
FLAGS = parse_cmdline(model_architectures.keys())
hvd.init(True)
if hvd.rank() == 0:
log_path = os.path.join(FLAGS.results_dir, FLAGS.log_filename)
os.makedirs(FLAGS.results_dir, exist_ok=True)
dllogger.init(backends=[
dllogger.JSONStreamBackend(
verbosity=dllogger.Verbosity.VERBOSE, filename=log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)
])
else:
dllogger.init(backends=[])
dllogger.log(data=vars(FLAGS), step='PARAMETER')
dllogger.metadata("train_throughput", {"unit": "images/s"})
dllogger.metadata("eval_throughput", {"unit": "images/s"})
dllogger.metadata("eval_latency_avg", {"unit": "ms"})
dllogger.metadata("eval_latency_p90", {"unit": "ms"})
dllogger.metadata("eval_latency_p95", {"unit": "ms"})
dllogger.metadata("eval_latency_p99", {"unit": "ms"})
dllogger.metadata("top1_accuracy", {"unit": None})
dllogger.metadata("top5_accuracy", {"unit": None})
runner = Runner(
# ========= Model HParams ========= #
n_classes=1001,
architecture=FLAGS.arch,
input_format='NHWC',
compute_format=FLAGS.data_format,
dtype=tf.float32,
n_channels=3,
height=224 if FLAGS.data_dir else FLAGS.synthetic_data_size,
width=224 if FLAGS.data_dir else FLAGS.synthetic_data_size,
distort_colors=False,
log_dir=FLAGS.results_dir,
model_dir=FLAGS.model_dir if FLAGS.model_dir is not None else FLAGS.results_dir,
data_dir=FLAGS.data_dir,
data_idx_dir=FLAGS.data_idx_dir,
weight_init=FLAGS.weight_init,
use_xla=FLAGS.xla,
use_tf_amp=FLAGS.amp,
use_dali=FLAGS.dali,
use_cpu=FLAGS.cpu,
gpu_memory_fraction=FLAGS.gpu_memory_fraction,
gpu_id=FLAGS.gpu_id,
seed=FLAGS.seed)
if FLAGS.mode in ["train", "train_and_evaluate", "training_benchmark"]:
runner.train(iter_unit=FLAGS.iter_unit,
num_iter=FLAGS.num_iter,
run_iter=FLAGS.run_iter,
batch_size=FLAGS.batch_size,
warmup_steps=FLAGS.warmup_steps,
log_every_n_steps=FLAGS.display_every,
weight_decay=FLAGS.weight_decay,
lr_init=FLAGS.lr_init,
lr_warmup_epochs=FLAGS.lr_warmup_epochs,
momentum=FLAGS.momentum,
loss_scale=FLAGS.static_loss_scale,
label_smoothing=FLAGS.label_smoothing,
mixup=FLAGS.mixup,
use_static_loss_scaling=(FLAGS.static_loss_scale != -1),
use_cosine_lr=FLAGS.cosine_lr,
is_benchmark=FLAGS.mode == 'training_benchmark',
use_final_conv=FLAGS.use_final_conv,
quantize=FLAGS.quantize,
symmetric=FLAGS.symmetric,
quant_delay=FLAGS.quant_delay,
use_qdq=FLAGS.use_qdq,
finetune_checkpoint=FLAGS.finetune_checkpoint)
if FLAGS.mode in ["train_and_evaluate", 'evaluate', 'inference_benchmark']:
if FLAGS.mode == 'inference_benchmark' and hvd.size() > 1:
raise NotImplementedError(
"Only single GPU inference is implemented.")
elif hvd.rank() == 0:
runner.evaluate(iter_unit=FLAGS.iter_unit if FLAGS.mode != "train_and_evaluate" else "epoch",
num_iter=FLAGS.num_iter if FLAGS.mode != "train_and_evaluate" else 1,
warmup_steps=FLAGS.warmup_steps,
batch_size=FLAGS.batch_size,
log_every_n_steps=FLAGS.display_every,
is_benchmark=FLAGS.mode == 'inference_benchmark',
export_dir=FLAGS.export_dir,
quantize=FLAGS.quantize,
symmetric=FLAGS.symmetric,
use_final_conv=FLAGS.use_final_conv,
use_qdq=FLAGS.use_qdq)
if hvd.size() > 1:
# Wait for all processes to finish
from mpi4py import MPI
MPI.COMM_WORLD.Barrier()
if FLAGS.mode == 'predict':
if FLAGS.to_predict is None:
raise ValueError("No data to predict on.")
if not os.path.isfile(FLAGS.to_predict):
raise ValueError("Only prediction on single images is supported!")
if hvd.size() > 1:
raise NotImplementedError(
"Only single GPU inference is implemented.")
else:
runner.predict(FLAGS.to_predict,
quantize=FLAGS.quantize,
symmetric=FLAGS.symmetric,
use_qdq=FLAGS.use_qdq,
use_final_conv=FLAGS.use_final_conv)
|
PyTorch/Recommendation/DLRM/tests/feature_specs | feature_specs | criteo_f15 | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
- cat_6.bin
- cat_7.bin
- cat_8.bin
- cat_9.bin
- cat_10.bin
- cat_11.bin
- cat_12.bin
- cat_13.bin
- cat_14.bin
- cat_15.bin
- cat_16.bin
- cat_17.bin
- cat_18.bin
- cat_19.bin
- cat_20.bin
- cat_21.bin
- cat_22.bin
- cat_23.bin
- cat_24.bin
- cat_25.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
- num_3
- num_4
- num_5
- num_6
- num_7
- num_8
- num_9
- num_10
- num_11
- num_12
feature_spec:
cat_0.bin:
cardinality: 7912889
dtype: int32
cat_1.bin:
cardinality: 33823
dtype: int32
cat_10.bin:
cardinality: 582469
dtype: int32
cat_11.bin:
cardinality: 245828
dtype: int32
cat_12.bin:
cardinality: 11
dtype: int8
cat_13.bin:
cardinality: 2209
dtype: int16
cat_14.bin:
cardinality: 10667
dtype: int16
cat_15.bin:
cardinality: 104
dtype: int8
cat_16.bin:
cardinality: 4
dtype: int8
cat_17.bin:
cardinality: 968
dtype: int16
cat_18.bin:
cardinality: 15
dtype: int8
cat_19.bin:
cardinality: 8165896
dtype: int32
cat_2.bin:
cardinality: 17139
dtype: int16
cat_20.bin:
cardinality: 2675940
dtype: int32
cat_21.bin:
cardinality: 7156453
dtype: int32
cat_22.bin:
cardinality: 302516
dtype: int32
cat_23.bin:
cardinality: 12022
dtype: int16
cat_24.bin:
cardinality: 97
dtype: int8
cat_25.bin:
cardinality: 35
dtype: int8
cat_3.bin:
cardinality: 7339
dtype: int16
cat_4.bin:
cardinality: 20046
dtype: int16
cat_5.bin:
cardinality: 4
dtype: int8
cat_6.bin:
cardinality: 7105
dtype: int16
cat_7.bin:
cardinality: 1382
dtype: int16
cat_8.bin:
cardinality: 63
dtype: int8
cat_9.bin:
cardinality: 5554114
dtype: int32
label:
dtype: bool
num_0:
dtype: float16
num_1:
dtype: float16
num_10:
dtype: float16
num_11:
dtype: float16
num_12:
dtype: float16
num_2:
dtype: float16
num_3:
dtype: float16
num_4:
dtype: float16
num_5:
dtype: float16
num_6:
dtype: float16
num_7:
dtype: float16
num_8:
dtype: float16
num_9:
dtype: float16
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: split_binary
- features:
- label
files:
- test/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- test/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- test/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- test/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- test/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- test/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- test/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- test/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- test/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- test/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- test/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- test/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- test/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- test/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- test/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- test/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- test/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- test/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- test/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- test/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- test/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- test/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- test/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- test/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- test/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- test/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- test/cat_25.bin
type: split_binary
train:
- features: *id001
files:
- train/numerical.bin
type: split_binary
- features:
- label
files:
- train/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- train/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- train/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- train/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- train/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- train/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- train/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- train/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- train/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- train/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- train/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- train/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- train/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- train/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- train/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- train/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- train/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- train/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- train/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- train/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- train/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- train/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- train/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- train/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- train/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- train/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- train/cat_25.bin
type: split_binary
|
PyTorch/Classification/ConvNets/resnet50v1.5/training/AMP | AMP | DGX2V_resnet50_AMP_250E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGX2V /imagenet --workspace ${1:-./} --raport-file raport.json
|
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | data_functions | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from tacotron2.data_function import TextMelCollate
from tacotron2.data_function import TextMelLoader
from waveglow.data_function import MelAudioLoader
from tacotron2.data_function import batch_to_gpu as batch_to_gpu_tacotron2
from waveglow.data_function import batch_to_gpu as batch_to_gpu_waveglow
def get_collate_function(model_name, n_frames_per_step=1):
if model_name == 'Tacotron2':
collate_fn = TextMelCollate(n_frames_per_step)
elif model_name == 'WaveGlow':
collate_fn = torch.utils.data.dataloader.default_collate
else:
raise NotImplementedError(
"unknown collate function requested: {}".format(model_name))
return collate_fn
def get_data_loader(model_name, dataset_path, audiopaths_and_text, args):
if model_name == 'Tacotron2':
data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args)
elif model_name == 'WaveGlow':
data_loader = MelAudioLoader(dataset_path, audiopaths_and_text, args)
else:
raise NotImplementedError(
"unknown data loader requested: {}".format(model_name))
return data_loader
def get_batch_to_gpu(model_name):
if model_name == 'Tacotron2':
batch_to_gpu = batch_to_gpu_tacotron2
elif model_name == 'WaveGlow':
batch_to_gpu = batch_to_gpu_waveglow
else:
raise NotImplementedError(
"unknown batch_to_gpu requested: {}".format(model_name))
return batch_to_gpu
|
TensorFlow/Detection/SSD/models/research/object_detection/metrics | metrics | coco_evaluation_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.object_detection.metrics.coco_evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_evaluation
def _get_categories_list():
return [{
'id': 1,
'name': 'person'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'cat'
}]
class CocoDetectionEvaluationTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
"""Tests that mAP is calculated correctly on GT and Detections."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image3',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image3',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsSkipCrowd(self):
"""Tests computing mAP with is_crowd GT boxes skipped."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.], [99., 99., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1, 2]),
standard_fields.InputDataFields.groundtruth_is_crowd:
np.array([0, 1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsEmptyCrowd(self):
"""Tests computing mAP with empty is_crowd array passed in."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes:
np.array([1]),
standard_fields.InputDataFields.groundtruth_is_crowd:
np.array([])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
def testRejectionOnDuplicateGroundtruth(self):
"""Tests that groundtruth cannot be added more than once for an image."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
# Add groundtruth
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
coco_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1
})
groundtruth_lists_len = len(coco_evaluator._groundtruth_list)
# Add groundtruth with the same image id.
coco_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1
})
self.assertEqual(groundtruth_lists_len,
len(coco_evaluator._groundtruth_list))
def testRejectionOnDuplicateDetections(self):
"""Tests that detections cannot be added more than once for an image."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
# Add groundtruth
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[99., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1])
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
detections_lists_len = len(coco_evaluator._detection_boxes_list)
coco_evaluator.add_single_detected_image_info(
image_id='image1', # Note that this image id was previously added.
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
self.assertEqual(detections_lists_len,
len(coco_evaluator._detection_boxes_list))
def testExceptionRaisedWithMissingGroundtruth(self):
"""Tests that exception is raised for detection with missing groundtruth."""
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
with self.assertRaises(ValueError):
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1])
})
class CocoEvaluationPyFuncTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(update_op,
feed_dict={
image_id: 'image1',
groundtruth_boxes: np.array([[100., 100., 200., 200.]]),
groundtruth_classes: np.array([1]),
detection_boxes: np.array([[100., 100., 200., 200.]]),
detection_scores: np.array([.8]),
detection_classes: np.array([1])
})
sess.run(update_op,
feed_dict={
image_id: 'image2',
groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
groundtruth_classes: np.array([3]),
detection_boxes: np.array([[50., 50., 100., 100.]]),
detection_scores: np.array([.7]),
detection_classes: np.array([3])
})
sess.run(update_op,
feed_dict={
image_id: 'image3',
groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
groundtruth_classes: np.array([2]),
detection_boxes: np.array([[25., 25., 50., 50.]]),
detection_scores: np.array([.9]),
detection_classes: np.array([2])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsIsAnnotated(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
is_annotated = tf.placeholder(tf.bool, shape=())
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
'is_annotated': is_annotated,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(update_op,
feed_dict={
image_id: 'image1',
groundtruth_boxes: np.array([[100., 100., 200., 200.]]),
groundtruth_classes: np.array([1]),
is_annotated: True,
detection_boxes: np.array([[100., 100., 200., 200.]]),
detection_scores: np.array([.8]),
detection_classes: np.array([1])
})
sess.run(update_op,
feed_dict={
image_id: 'image2',
groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
groundtruth_classes: np.array([3]),
is_annotated: True,
detection_boxes: np.array([[50., 50., 100., 100.]]),
detection_scores: np.array([.7]),
detection_classes: np.array([3])
})
sess.run(update_op,
feed_dict={
image_id: 'image3',
groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
groundtruth_classes: np.array([2]),
is_annotated: True,
detection_boxes: np.array([[25., 25., 50., 50.]]),
detection_scores: np.array([.9]),
detection_classes: np.array([2])
})
sess.run(update_op,
feed_dict={
image_id: 'image4',
groundtruth_boxes: np.zeros((0, 4)),
groundtruth_classes: np.zeros((0)),
is_annotated: False, # Note that this image isn't annotated.
detection_boxes: np.array([[25., 25., 50., 50.],
[25., 25., 70., 50.],
[25., 25., 80., 50.],
[25., 25., 90., 50.]]),
detection_scores: np.array([0.6, 0.7, 0.8, 0.9]),
detection_classes: np.array([1, 2, 2, 3])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsPadded(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
detection_boxes = tf.placeholder(tf.float32, shape=(None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.], [-1, -1, -1, -1]]),
groundtruth_classes:
np.array([1, -1]),
detection_boxes:
np.array([[100., 100., 200., 200.], [0., 0., 0., 0.]]),
detection_scores:
np.array([.8, 0.]),
detection_classes:
np.array([1, -1])
})
sess.run(
update_op,
feed_dict={
image_id:
'image2',
groundtruth_boxes:
np.array([[50., 50., 100., 100.], [-1, -1, -1, -1]]),
groundtruth_classes:
np.array([3, -1]),
detection_boxes:
np.array([[50., 50., 100., 100.], [0., 0., 0., 0.]]),
detection_scores:
np.array([.7, 0.]),
detection_classes:
np.array([3, -1])
})
sess.run(
update_op,
feed_dict={
image_id:
'image3',
groundtruth_boxes:
np.array([[25., 25., 50., 50.], [10., 10., 15., 15.]]),
groundtruth_classes:
np.array([2, 2]),
detection_boxes:
np.array([[25., 25., 50., 50.], [10., 10., 15., 15.]]),
detection_scores:
np.array([.95, .9]),
detection_classes:
np.array([2, 2])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
batch_size = 3
image_id = tf.placeholder(tf.string, shape=(batch_size))
groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(update_op,
feed_dict={
image_id: ['image1', 'image2', 'image3'],
groundtruth_boxes: np.array([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]]),
groundtruth_classes: np.array([[1], [3], [2]]),
detection_boxes: np.array([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]]),
detection_scores: np.array([[.8], [.7], [.9]]),
detection_classes: np.array([[1], [3], [2]])
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsPaddedBatches(self):
coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
_get_categories_list())
batch_size = 3
image_id = tf.placeholder(tf.string, shape=(batch_size))
groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
num_gt_boxes_per_image = tf.placeholder(tf.int32, shape=(None))
detection_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
num_det_boxes_per_image = tf.placeholder(tf.int32, shape=(None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
detection_fields.detection_boxes: detection_boxes,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
'num_groundtruth_boxes_per_image': num_gt_boxes_per_image,
'num_det_boxes_per_image': num_det_boxes_per_image
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id: ['image1', 'image2', 'image3'],
groundtruth_boxes:
np.array([[[100., 100., 200., 200.], [-1, -1, -1, -1]],
[[50., 50., 100., 100.], [-1, -1, -1, -1]],
[[25., 25., 50., 50.], [10., 10., 15., 15.]]]),
groundtruth_classes:
np.array([[1, -1], [3, -1], [2, 2]]),
num_gt_boxes_per_image:
np.array([1, 1, 2]),
detection_boxes:
np.array([[[100., 100., 200., 200.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[50., 50., 100., 100.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[25., 25., 50., 50.],
[10., 10., 15., 15.],
[10., 10., 15., 15.]]]),
detection_scores:
np.array([[.8, 0., 0.], [.7, 0., 0.], [.95, .9, 0.9]]),
detection_classes:
np.array([[1, -1, -1], [3, -1, -1], [2, 2, 2]]),
num_det_boxes_per_image:
np.array([1, 1, 3]),
})
# Check the number of bounding boxes added.
self.assertEqual(len(coco_evaluator._groundtruth_list), 4)
self.assertEqual(len(coco_evaluator._detection_boxes_list), 5)
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@1'], 0.83333331)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionBoxes_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_boxes_list)
self.assertFalse(coco_evaluator._image_ids)
class CocoMaskEvaluationTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list())
coco_evaluator.add_single_ground_truth_image_info(
image_id='image1',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image1',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[100., 100., 200., 200.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image2',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image2',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[50., 50., 100., 100.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_ground_truth_image_info(
image_id='image3',
groundtruth_dict={
standard_fields.InputDataFields.groundtruth_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.InputDataFields.groundtruth_classes: np.array([1]),
standard_fields.InputDataFields.groundtruth_instance_masks:
np.pad(np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
coco_evaluator.add_single_detected_image_info(
image_id='image3',
detections_dict={
standard_fields.DetectionResultFields.detection_boxes:
np.array([[25., 25., 50., 50.]]),
standard_fields.DetectionResultFields.detection_scores:
np.array([.8]),
standard_fields.DetectionResultFields.detection_classes:
np.array([1]),
standard_fields.DetectionResultFields.detection_masks:
np.pad(np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)), mode='constant')
})
metrics = coco_evaluator.evaluate()
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
coco_evaluator.clear()
self.assertFalse(coco_evaluator._image_id_to_mask_shape_map)
self.assertFalse(coco_evaluator._image_ids_with_detections)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._detection_masks_list)
class CocoMaskEvaluationPyFuncTest(tf.test.TestCase):
def testGetOneMAPWithMatchingGroundtruthAndDetections(self):
coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list())
image_id = tf.placeholder(tf.string, shape=())
groundtruth_boxes = tf.placeholder(tf.float32, shape=(None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(None))
groundtruth_masks = tf.placeholder(tf.uint8, shape=(None, None, None))
detection_scores = tf.placeholder(tf.float32, shape=(None))
detection_classes = tf.placeholder(tf.float32, shape=(None))
detection_masks = tf.placeholder(tf.uint8, shape=(None, None, None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_instance_masks: groundtruth_masks,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id:
'image1',
groundtruth_boxes:
np.array([[100., 100., 200., 200.], [50., 50., 100., 100.]]),
groundtruth_classes:
np.array([1, 2]),
groundtruth_masks:
np.stack([
np.pad(
np.ones([100, 100], dtype=np.uint8), ((10, 10),
(10, 10)),
mode='constant'),
np.pad(
np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)),
mode='constant')
]),
detection_scores:
np.array([.9, .8]),
detection_classes:
np.array([2, 1]),
detection_masks:
np.stack([
np.pad(
np.ones([50, 50], dtype=np.uint8), ((0, 70), (0, 70)),
mode='constant'),
np.pad(
np.ones([100, 100], dtype=np.uint8), ((10, 10),
(10, 10)),
mode='constant'),
])
})
sess.run(update_op,
feed_dict={
image_id: 'image2',
groundtruth_boxes: np.array([[50., 50., 100., 100.]]),
groundtruth_classes: np.array([1]),
groundtruth_masks: np.pad(np.ones([1, 50, 50],
dtype=np.uint8),
((0, 0), (10, 10), (10, 10)),
mode='constant'),
detection_scores: np.array([.8]),
detection_classes: np.array([1]),
detection_masks: np.pad(np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (10, 10), (10, 10)),
mode='constant')
})
sess.run(update_op,
feed_dict={
image_id: 'image3',
groundtruth_boxes: np.array([[25., 25., 50., 50.]]),
groundtruth_classes: np.array([1]),
groundtruth_masks: np.pad(np.ones([1, 25, 25],
dtype=np.uint8),
((0, 0), (10, 10), (10, 10)),
mode='constant'),
detection_scores: np.array([.8]),
detection_classes: np.array([1]),
detection_masks: np.pad(np.ones([1, 25, 25],
dtype=np.uint8),
((0, 0), (10, 10), (10, 10)),
mode='constant')
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._image_ids_with_detections)
self.assertFalse(coco_evaluator._image_id_to_mask_shape_map)
self.assertFalse(coco_evaluator._detection_masks_list)
def testGetOneMAPWithMatchingGroundtruthAndDetectionsBatched(self):
coco_evaluator = coco_evaluation.CocoMaskEvaluator(_get_categories_list())
batch_size = 3
image_id = tf.placeholder(tf.string, shape=(batch_size))
groundtruth_boxes = tf.placeholder(tf.float32, shape=(batch_size, None, 4))
groundtruth_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
groundtruth_masks = tf.placeholder(
tf.uint8, shape=(batch_size, None, None, None))
detection_scores = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_classes = tf.placeholder(tf.float32, shape=(batch_size, None))
detection_masks = tf.placeholder(
tf.uint8, shape=(batch_size, None, None, None))
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
eval_dict = {
input_data_fields.key: image_id,
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes,
input_data_fields.groundtruth_instance_masks: groundtruth_masks,
detection_fields.detection_scores: detection_scores,
detection_fields.detection_classes: detection_classes,
detection_fields.detection_masks: detection_masks,
}
eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)
_, update_op = eval_metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
sess.run(
update_op,
feed_dict={
image_id: ['image1', 'image2', 'image3'],
groundtruth_boxes:
np.array([[[100., 100., 200., 200.]],
[[50., 50., 100., 100.]],
[[25., 25., 50., 50.]]]),
groundtruth_classes:
np.array([[1], [1], [1]]),
groundtruth_masks:
np.stack([
np.pad(
np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (0, 0), (0, 0)),
mode='constant'),
np.pad(
np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (25, 25), (25, 25)),
mode='constant'),
np.pad(
np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (37, 38), (37, 38)),
mode='constant')
],
axis=0),
detection_scores:
np.array([[.8], [.8], [.8]]),
detection_classes:
np.array([[1], [1], [1]]),
detection_masks:
np.stack([
np.pad(
np.ones([1, 100, 100], dtype=np.uint8),
((0, 0), (0, 0), (0, 0)),
mode='constant'),
np.pad(
np.ones([1, 50, 50], dtype=np.uint8),
((0, 0), (25, 25), (25, 25)),
mode='constant'),
np.pad(
np.ones([1, 25, 25], dtype=np.uint8),
((0, 0), (37, 38), (37, 38)),
mode='constant')
],
axis=0)
})
metrics = {}
for key, (value_op, _) in eval_metric_ops.iteritems():
metrics[key] = value_op
metrics = sess.run(metrics)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/[email protected]'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Precision/mAP (small)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@1'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@10'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (large)'], 1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (medium)'],
1.0)
self.assertAlmostEqual(metrics['DetectionMasks_Recall/AR@100 (small)'], 1.0)
self.assertFalse(coco_evaluator._groundtruth_list)
self.assertFalse(coco_evaluator._image_ids_with_detections)
self.assertFalse(coco_evaluator._image_id_to_mask_shape_map)
self.assertFalse(coco_evaluator._detection_masks_list)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer | perf_analyzer | runner | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import logging
import os
import pathlib
import sys
from distutils.version import LooseVersion
from typing import Dict, List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...report import save_results, show_results, sort_results
from ...utils import log_dict, parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
LOGGER = logging.getLogger("triton_performance_runner.perf_analyzer")
class PerfAnalyzerRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
timeout: Optional[int] = None,
verbose: bool = False,
):
log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"result_path": result_path,
"timeout": timeout,
"verbose": verbose,
},
)
if result_path.suffix != ".csv":
raise ValueError(
"Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv"
)
self._server_url = server_url
self._model_name = model_name
self._input_data = input_data
self._input_shapes = input_shapes
self._batch_sizes = batch_sizes
self._concurrency = concurrency
self._measurement_mode = measurement_mode
self._measurement_interval = measurement_interval
self._measurement_request_count = measurement_request_count
self._evaluation_mode = evaluation_mode
self._offline_mode = offline_mode
self._result_path = result_path
self._output_shared_memory_size = output_shared_memory_size
self._timeout = timeout
self._verbose = verbose
self._protocol, self._host, self._port = parse_server_url(server_url)
def run(self):
results: List[Dict] = []
for batch_size in self._batch_sizes:
for concurrency in self._concurrency:
performance_partial_file = (
f"{self._evaluation_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv"
)
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": self._input_data,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"latency-report-file": performance_partial_file,
}
if self._verbose:
params["extra-verbose"] = True
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
if self._verbose:
log_dict(
f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params
)
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in self._input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
self._update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=self._result_path.as_posix(), data=results)
show_results(results=results)
def _calculate_average_latency(self, r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum(int(r.get(f, 0)) for f in avg_sum_fields)
return avg_latency
def _update_performance_data(self, results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"Batch": batch_size}
with open(performance_partial_file) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = self._calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
|
PyTorch/Classification/GPUNet/triton/065ms | 065ms | README | # Deploying the GPUNet model on Triton Inference Server
This folder contains instructions for deployment to run inference
on Triton Inference Server as well as a detailed performance analysis.
The purpose of this document is to help you with achieving
the best inference performance.
## Table of contents
- [Solution overview](#solution-overview)
- [Introduction](#introduction)
- [Deployment process](#deployment-process)
- [Setup](#setup)
- [Quick Start Guide](#quick-start-guide)
- [Performance](#performance)
- [Offline scenario](#offline-scenario)
- [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16)
- [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16)
- [Online scenario](#online-scenario)
- [Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#online-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16)
- [Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#online-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16)
- [Advanced](#advanced)
- [Step by step deployment process](#step-by-step-deployment-process)
- [Latency explanation](#latency-explanation)
- [Release notes](#release-notes)
- [Changelog](#changelog)
- [Known issues](#known-issues)
## Solution overview
### Introduction
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server)
provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs.
The server provides an inference service via an HTTP or gRPC endpoint,
allowing remote clients to request inferencing for any number of GPU
or CPU models being managed by the server.
This README provides step-by-step deployment instructions for models generated
during training (as described in the [model README](../readme.md)).
Additionally, this README provides the corresponding deployment scripts that
ensure optimal GPU utilization during inferencing on Triton Inference Server.
### Deployment process
The deployment process consists of two steps:
1. Conversion.
The purpose of conversion is to find the best performing model
format supported by Triton Inference Server.
Triton Inference Server uses a number of runtime backends such as
[TensorRT](https://developer.nvidia.com/tensorrt),
[LibTorch](https://github.com/triton-inference-server/pytorch_backend) and
[ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend)
to support various model types. Refer to the
[Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton)
for a list of available backends.
2. Configuration.
Model configuration on Triton Inference Server, which generates
necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md).
After deployment Triton inference server is used for evaluation of converted model in two steps:
1. Correctness tests.
Produce results which are tested against given correctness thresholds.
2. Performance tests.
Produce latency and throughput results for offline (static batching)
and online (dynamic batching) scenarios.
All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide)
## Setup
Ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [NVIDIA PyTorch NGC container 21.12](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch)
* [NVIDIA Triton Inference Server NGC container 21.12](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver)
* [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html)
* [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
## Quick Start Guide
Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd PyTorch/Classification/GPUNet
```
2. Prepare dataset.
See the [Quick Start Guide](../../README.md#prepare-the-dataset)
3. Build and run a container that extends NGC PyTorch with the Triton client libraries and necessary dependencies.
```
./triton/scripts/docker/build.sh
./triton/scripts/docker/interactive.sh /path/to/imagenet/val/
```
4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU).
```
NVIDIA DGX-1 (1x V100 32GB): ./triton/065ms/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh
NVIDIA DGX A100 (1x A100 80GB): ./triton/065ms/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect
the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to
[NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Offline scenario
The offline scenario assumes the client and server are located on the same host. The tests uses:
- tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used
- single request is send from client to server with static size of batch
#### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 842.00 | 0.05 | 0.25 | 0.09 | 0.15 | 0.63 | 0.01 | 0.00 | 1.15 | 1.35 | 1.40 | 1.46 | 1.18 |
| 2 | 1 | 1340.00 | 0.06 | 0.26 | 0.09 | 0.25 | 0.81 | 0.01 | 0.00 | 1.47 | 1.65 | 1.70 | 1.77 | 1.49 |
| 4 | 1 | 2076.00 | 0.05 | 0.27 | 0.08 | 0.37 | 1.14 | 0.01 | 0.00 | 1.89 | 2.09 | 2.15 | 2.19 | 1.92 |
| 8 | 1 | 2800.00 | 0.05 | 0.26 | 0.09 | 0.61 | 1.83 | 0.01 | 0.00 | 2.84 | 3.00 | 3.04 | 3.08 | 2.85 |
| 16 | 1 | 3504.00 | 0.05 | 0.26 | 0.09 | 1.06 | 3.07 | 0.01 | 0.00 | 4.51 | 4.70 | 4.73 | 4.83 | 4.54 |
| 32 | 1 | 4096.00 | 0.05 | 0.26 | 0.08 | 1.97 | 5.43 | 0.02 | 0.00 | 7.76 | 7.99 | 8.02 | 8.08 | 7.81 |
| 64 | 1 | 4480.00 | 0.05 | 0.27 | 0.09 | 3.82 | 10.03 | 0.02 | 0.00 | 14.25 | 14.46 | 14.51 | 14.56 | 14.28 |
</details>
#### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td>
</tr>
<tr>
<td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 1 | 1478.00 | 0.02 | 0.08 | 0.02 | 0.10 | 0.46 | 0.00 | 0.00 | 0.66 | 0.68 | 0.70 | 0.85 | 0.67 |
| 2 | 1 | 2668.00 | 0.02 | 0.06 | 0.02 | 0.14 | 0.51 | 0.00 | 0.00 | 0.75 | 0.76 | 0.77 | 0.77 | 0.74 |
| 4 | 1 | 4092.00 | 0.02 | 0.07 | 0.02 | 0.20 | 0.66 | 0.00 | 0.00 | 0.97 | 0.98 | 0.99 | 1.13 | 0.97 |
| 8 | 1 | 5936.00 | 0.02 | 0.06 | 0.02 | 0.34 | 0.91 | 0.00 | 0.00 | 1.33 | 1.36 | 1.41 | 1.57 | 1.34 |
| 16 | 1 | 7008.00 | 0.02 | 0.07 | 0.02 | 0.64 | 1.52 | 0.01 | 0.00 | 2.27 | 2.33 | 2.38 | 2.54 | 2.28 |
| 32 | 1 | 7072.00 | 0.02 | 0.12 | 0.02 | 1.47 | 2.84 | 0.03 | 0.00 | 4.49 | 4.59 | 4.66 | 4.89 | 4.51 |
| 64 | 1 | 7680.00 | 0.02 | 0.13 | 0.02 | 2.95 | 5.12 | 0.04 | 0.00 | 8.27 | 8.42 | 8.53 | 8.74 | 8.29 |
</details>
### Online scenario
The online scenario assumes the client and server are located on different hosts. The tests uses:
- tensors are passed through HTTP from client to server
- concurrent requests are send from client to server, the final batch is created on server side
#### Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX-1 (1x V100 32GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 8 | 1225.00 | 0.10 | 1.04 | 3.52 | 0.24 | 1.60 | 0.01 | 0.00 | 6.72 | 7.36 | 7.59 | 7.78 | 6.52 |
| 1 | 16 | 1658.00 | 0.11 | 2.18 | 4.45 | 0.69 | 2.16 | 0.02 | 0.00 | 9.34 | 13.55 | 13.97 | 14.62 | 9.61 |
| 1 | 24 | 1987.00 | 0.12 | 3.02 | 5.29 | 0.99 | 2.53 | 0.02 | 0.00 | 11.90 | 15.62 | 16.94 | 19.39 | 11.96 |
| 1 | 32 | 2208.00 | 0.12 | 3.73 | 6.15 | 1.39 | 2.93 | 0.02 | 0.00 | 14.02 | 20.91 | 21.83 | 22.93 | 14.34 |
| 1 | 40 | 2368.00 | 0.14 | 5.38 | 6.05 | 1.88 | 3.28 | 0.03 | 0.00 | 17.98 | 22.21 | 22.74 | 23.55 | 16.75 |
| 1 | 48 | 2368.00 | 0.18 | 8.29 | 6.44 | 1.85 | 3.25 | 0.03 | 0.00 | 21.42 | 27.00 | 28.50 | 30.02 | 20.03 |
| 1 | 56 | 2509.00 | 0.18 | 7.99 | 7.28 | 2.62 | 3.76 | 0.04 | 0.00 | 23.58 | 29.22 | 30.09 | 31.43 | 21.86 |
| 1 | 64 | 2674.00 | 0.20 | 8.42 | 8.53 | 2.72 | 3.82 | 0.04 | 0.00 | 25.18 | 31.21 | 33.56 | 43.09 | 23.73 |
| 1 | 72 | 2688.00 | 0.20 | 10.05 | 8.97 | 3.09 | 4.07 | 0.04 | 0.00 | 27.59 | 35.28 | 37.35 | 40.03 | 26.44 |
| 1 | 80 | 2610.00 | 0.22 | 11.76 | 9.11 | 4.50 | 4.65 | 0.06 | 0.00 | 31.73 | 42.18 | 44.10 | 45.90 | 30.30 |
| 1 | 88 | 2573.00 | 0.22 | 10.54 | 9.94 | 7.01 | 5.78 | 0.08 | 0.00 | 41.51 | 44.72 | 45.70 | 49.03 | 33.58 |
| 1 | 96 | 2815.00 | 0.23 | 12.18 | 11.07 | 4.94 | 5.09 | 0.06 | 0.00 | 34.73 | 44.71 | 48.38 | 56.71 | 33.58 |
| 1 | 104 | 2732.00 | 0.25 | 11.90 | 12.11 | 7.01 | 6.00 | 0.08 | 0.00 | 38.49 | 51.49 | 54.39 | 58.54 | 37.36 |
| 1 | 112 | 2869.00 | 0.26 | 11.69 | 13.93 | 6.49 | 5.68 | 0.08 | 0.00 | 37.86 | 50.94 | 55.40 | 64.80 | 38.11 |
| 1 | 120 | 2958.00 | 0.26 | 12.24 | 13.02 | 7.48 | 6.78 | 0.10 | 0.00 | 42.24 | 54.54 | 57.09 | 59.88 | 39.87 |
| 1 | 128 | 2990.00 | 0.24 | 14.14 | 14.39 | 6.59 | 6.35 | 0.09 | 0.00 | 43.49 | 54.44 | 58.77 | 70.31 | 41.80 |
| 1 | 136 | 2989.00 | 0.28 | 15.34 | 15.02 | 7.03 | 6.80 | 0.10 | 0.00 | 45.64 | 59.21 | 62.02 | 65.34 | 44.59 |
| 1 | 144 | 2989.00 | 0.27 | 16.48 | 15.56 | 8.41 | 6.72 | 0.10 | 0.00 | 48.12 | 65.24 | 67.14 | 70.71 | 47.54 |
| 1 | 152 | 2964.00 | 0.27 | 16.89 | 17.22 | 8.32 | 7.00 | 0.10 | 0.00 | 50.68 | 68.96 | 73.71 | 80.31 | 49.80 |
| 1 | 160 | 3026.00 | 0.27 | 16.01 | 18.03 | 9.76 | 7.50 | 0.13 | 0.00 | 52.67 | 66.81 | 68.20 | 74.10 | 51.69 |
| 1 | 168 | 3113.89 | 0.29 | 16.40 | 17.93 | 9.34 | 8.39 | 0.13 | 0.00 | 53.71 | 68.57 | 70.61 | 73.08 | 52.48 |
| 1 | 176 | 3194.00 | 0.35 | 15.40 | 19.42 | 9.78 | 8.63 | 0.13 | 0.00 | 54.05 | 70.48 | 73.20 | 77.89 | 53.72 |
| 1 | 184 | 3246.00 | 0.31 | 17.21 | 19.43 | 9.39 | 8.46 | 0.13 | 0.00 | 56.10 | 70.56 | 75.07 | 79.31 | 54.94 |
| 1 | 192 | 3165.00 | 0.32 | 18.71 | 19.74 | 10.00 | 9.01 | 0.15 | 0.00 | 59.04 | 71.28 | 73.31 | 77.61 | 57.92 |
| 1 | 200 | 3230.00 | 0.28 | 21.48 | 18.28 | 10.50 | 9.30 | 0.16 | 0.00 | 61.72 | 74.04 | 75.61 | 81.67 | 59.99 |
| 1 | 208 | 3268.00 | 0.32 | 18.42 | 23.43 | 9.82 | 8.61 | 0.14 | 0.00 | 61.70 | 75.20 | 79.59 | 84.76 | 60.73 |
| 1 | 216 | 3263.00 | 0.32 | 19.63 | 23.60 | 11.11 | 9.59 | 0.15 | 0.00 | 65.28 | 80.60 | 85.08 | 91.09 | 64.41 |
| 1 | 224 | 3145.00 | 0.36 | 21.09 | 23.86 | 13.06 | 10.67 | 0.16 | 0.00 | 72.96 | 83.93 | 86.35 | 92.58 | 69.20 |
| 1 | 232 | 3148.00 | 0.36 | 22.02 | 24.26 | 12.64 | 11.42 | 0.17 | 0.00 | 75.53 | 84.75 | 87.35 | 94.60 | 70.87 |
| 1 | 240 | 3342.00 | 0.49 | 16.67 | 29.95 | 11.96 | 10.46 | 0.17 | 0.00 | 70.85 | 87.04 | 90.95 | 95.84 | 69.70 |
| 1 | 248 | 3357.00 | 0.32 | 27.51 | 22.90 | 10.18 | 9.23 | 0.15 | 0.00 | 71.50 | 86.61 | 94.11 | 103.46 | 70.30 |
| 1 | 256 | 3361.00 | 0.42 | 22.20 | 28.57 | 11.95 | 10.67 | 0.16 | 0.00 | 76.68 | 87.06 | 89.44 | 96.00 | 73.98 |
</details>
#### Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16
Our results were obtained using the following configuration:
| Parameter Name | Parameter Value |
|:-----------------------------|:-----------------------------|
| GPU |NVIDIA DGX A100 (1x A100 80GB) |
| Backend |ONNX Runtime |
| Backend accelerator |NVIDIA TensorRT|
| Precision |FP16 |
| Model format |ONNX |
| Max batch size |64 |
| Number of model instances |2|
| Export Format | ONNX |
| Device Kind | gpu |
| Torch Jit | none |
<table>
<tbody>
<tr>
<td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td>
</tr>
</tbody>
</table>
<details>
<summary>Results Table</summary>
| Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) |
|--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 8 | 1864.00 | 0.07 | 0.71 | 2.34 | 0.16 | 0.99 | 0.01 | 0.00 | 4.25 | 4.77 | 4.94 | 5.20 | 4.28 |
| 1 | 16 | 2607.39 | 0.08 | 1.51 | 2.90 | 0.40 | 1.22 | 0.01 | 0.00 | 6.08 | 7.60 | 8.03 | 8.95 | 6.12 |
| 1 | 24 | 2997.00 | 0.09 | 2.54 | 3.25 | 0.72 | 1.37 | 0.02 | 0.00 | 7.97 | 10.57 | 11.18 | 13.21 | 7.99 |
| 1 | 32 | 3276.00 | 0.11 | 3.52 | 3.40 | 1.14 | 1.52 | 0.03 | 0.00 | 10.15 | 13.06 | 13.59 | 14.38 | 9.72 |
| 1 | 40 | 3445.00 | 0.11 | 4.50 | 3.63 | 1.51 | 1.73 | 0.04 | 0.00 | 12.63 | 15.10 | 15.51 | 16.41 | 11.52 |
| 1 | 48 | 3608.00 | 0.11 | 5.86 | 3.97 | 1.51 | 1.71 | 0.04 | 0.00 | 13.68 | 16.60 | 17.32 | 18.51 | 13.19 |
| 1 | 56 | 3821.00 | 0.11 | 6.07 | 4.35 | 1.99 | 1.98 | 0.05 | 0.00 | 15.77 | 19.19 | 19.82 | 20.99 | 14.55 |
| 1 | 64 | 4070.00 | 0.10 | 6.20 | 4.78 | 2.33 | 2.16 | 0.05 | 0.00 | 16.65 | 20.56 | 21.78 | 23.89 | 15.62 |
| 1 | 72 | 4187.00 | 0.12 | 5.98 | 5.92 | 2.62 | 2.33 | 0.06 | 0.00 | 18.07 | 22.57 | 23.69 | 25.99 | 17.02 |
| 1 | 80 | 4329.00 | 0.10 | 6.75 | 5.95 | 2.85 | 2.47 | 0.07 | 0.00 | 19.08 | 23.93 | 25.12 | 26.30 | 18.19 |
| 1 | 88 | 4474.00 | 0.12 | 6.88 | 6.68 | 3.18 | 2.57 | 0.07 | 0.00 | 20.14 | 25.30 | 26.72 | 30.18 | 19.49 |
| 1 | 96 | 4590.00 | 0.12 | 8.08 | 6.42 | 3.23 | 2.70 | 0.08 | 0.00 | 21.43 | 26.92 | 28.13 | 32.91 | 20.63 |
| 1 | 104 | 4632.00 | 0.11 | 7.98 | 7.31 | 3.73 | 2.97 | 0.09 | 0.00 | 22.79 | 28.68 | 31.35 | 36.71 | 22.18 |
| 1 | 112 | 4654.00 | 0.10 | 10.48 | 6.84 | 3.39 | 2.81 | 0.08 | 0.00 | 24.41 | 31.16 | 33.22 | 37.03 | 23.70 |
| 1 | 120 | 4929.00 | 0.11 | 9.14 | 8.01 | 3.66 | 3.06 | 0.09 | 0.00 | 24.90 | 31.41 | 32.98 | 39.08 | 24.07 |
| 1 | 128 | 4842.00 | 0.10 | 9.54 | 8.50 | 4.48 | 3.30 | 0.10 | 0.00 | 26.78 | 34.51 | 36.68 | 38.60 | 26.02 |
| 1 | 136 | 4869.00 | 0.10 | 9.87 | 9.05 | 4.83 | 3.48 | 0.11 | 0.00 | 27.87 | 34.78 | 36.79 | 40.60 | 27.45 |
| 1 | 144 | 5155.00 | 0.11 | 9.83 | 9.41 | 4.60 | 3.58 | 0.11 | 0.00 | 28.51 | 36.00 | 37.76 | 41.36 | 27.64 |
| 1 | 152 | 5113.00 | 0.12 | 9.96 | 9.53 | 5.55 | 3.88 | 0.13 | 0.00 | 30.28 | 37.23 | 38.74 | 41.21 | 29.17 |
| 1 | 160 | 5053.00 | 0.11 | 11.25 | 10.37 | 5.44 | 3.82 | 0.13 | 0.00 | 32.03 | 40.37 | 43.19 | 45.75 | 31.12 |
| 1 | 168 | 5018.00 | 0.12 | 11.42 | 11.14 | 6.20 | 4.00 | 0.14 | 0.00 | 33.98 | 42.41 | 45.32 | 48.52 | 33.01 |
| 1 | 176 | 5146.00 | 0.12 | 11.42 | 11.63 | 6.05 | 4.10 | 0.14 | 0.00 | 34.48 | 43.39 | 45.25 | 50.67 | 33.46 |
| 1 | 184 | 4805.00 | 0.12 | 18.49 | 10.25 | 4.99 | 3.40 | 0.11 | 0.00 | 32.61 | 58.79 | 62.32 | 67.53 | 37.36 |
| 1 | 192 | 5458.00 | 0.13 | 10.60 | 11.73 | 6.86 | 4.87 | 0.16 | 0.00 | 36.11 | 42.32 | 43.57 | 45.46 | 34.36 |
| 1 | 200 | 5095.00 | 0.15 | 11.19 | 14.90 | 7.52 | 4.58 | 0.15 | 0.00 | 38.94 | 48.22 | 50.25 | 54.12 | 38.49 |
| 1 | 208 | 5470.00 | 0.10 | 12.16 | 12.25 | 7.59 | 4.97 | 0.16 | 0.00 | 38.11 | 45.97 | 46.42 | 48.32 | 37.23 |
| 1 | 216 | 5382.00 | 0.11 | 13.92 | 13.65 | 6.74 | 4.49 | 0.14 | 0.00 | 39.30 | 50.41 | 53.34 | 58.88 | 39.06 |
| 1 | 224 | 5478.00 | 0.11 | 13.06 | 15.09 | 6.65 | 4.43 | 0.15 | 0.00 | 39.40 | 50.39 | 53.51 | 57.37 | 39.49 |
| 1 | 232 | 5385.00 | 0.11 | 13.58 | 13.64 | 8.54 | 6.00 | 0.18 | 0.00 | 43.78 | 50.20 | 51.78 | 55.14 | 42.04 |
| 1 | 240 | 5519.00 | 0.12 | 11.83 | 17.19 | 7.90 | 5.36 | 0.17 | 0.00 | 43.49 | 51.74 | 54.30 | 59.48 | 42.57 |
| 1 | 248 | 5422.00 | 0.12 | 14.23 | 16.04 | 8.82 | 5.56 | 0.18 | 0.00 | 46.15 | 53.49 | 56.08 | 59.57 | 44.95 |
| 1 | 256 | 5215.00 | 0.10 | 22.93 | 12.82 | 7.06 | 4.52 | 0.15 | 0.00 | 41.19 | 76.05 | 83.77 | 91.88 | 47.58 |
</details>
## Advanced
| Inference runtime | Mnemonic used in scripts |
|-------------------|--------------------------|
| [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` |
| [TorchScript Scripting](https://pytorch.org/docs/stable/jit.html) | `ts-script` |
| [ONNX](https://onnx.ai) | `onnx` |
| [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` |
### Step by step deployment process
Commands described below can be used for exporting, converting and profiling the model.
#### Clone Repository
IMPORTANT: This step is executed on the host computer.
<details>
<summary>Clone Repository Command</summary>
```shell
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd PyTorch/Classification/GPUNet
```
</details>
#### Start Triton Inference Server
Setup the environment in the host computer and start Triton Inference Server.
<details>
<summary>Setup Environment and Start Triton Inference Server Command</summary>
```shell
source ./triton/scripts/setup_environment.sh
./triton/scripts/docker/triton_inference_server.sh
```
</details>
#### Prepare Dataset.
Please use the data download from the [Main QSG](../../README.md#prepare-the-dataset)
#### Prepare Checkpoint
Please download a checkpoint from [here](https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_0_pyt_ckpt/versions/21.12.0_amp/zip)
and place it in `runner_workspace/checkpoints/0.65ms/`. Note that the `0.65ms` subdirectory may not be created yet.
#### Setup Container
Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies.
<details>
<summary>Setup Container Command</summary>
Build container:
```shell
./triton/scripts/docker/build.sh
```
Run container in interactive mode:
```shell
./triton/scripts/docker/interactive.sh /path/to/imagenet/val/
```
Setup environment in order to share artifacts in steps and with Triton Inference Server:
```shell
source ./triton/scripts/setup_environment.sh
```
</details>
#### Prepare configuration
You can use the environment variables to set the parameters of your inference configuration.
Example values of some key variables in one configuration:
<details>
<summary>Export Variables</summary>
```shell
export FORMAT="onnx"
export PRECISION="fp16"
export EXPORT_FORMAT="onnx"
export EXPORT_PRECISION="fp16"
export BACKEND_ACCELERATOR="trt"
export NUMBER_OF_MODEL_INSTANCES="2"
export TENSORRT_CAPTURE_CUDA_GRAPH="0"
export CHECKPOINT="0.65ms"
export CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT}
```
</details>
#### Export Model
Export model from Python source to desired format (e.g. Savedmodel or TorchScript)
<details>
<summary>Export Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit none \
\
--config /workspace/gpunet/configs/batch1/GV100/0.65ms.json \
--checkpoint ${CHECKPOINT_DIR}/0.65ms.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
```
</details>
#### Convert Model
Convert the model from training to inference format (e.g. TensorRT).
<details>
<summary>Convert Model Command</summary>
```shell
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size 64 \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
```
</details>
#### Deploy Model
Configure the model on Triton Inference Server.
Generate the configuration from your model repository.
<details>
<summary>Deploy Model Command</summary>
```shell
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size 64 \
--batching dynamic \
--preferred-batch-sizes 64 \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
```
</details>
#### Triton Performance Offline Test
We want to maximize throughput. It assumes you have your data available
for inference or that your data saturate to maximum batch size quickly.
Triton Inference Server supports offline scenarios with static batching.
Static batching allows inference requests to be served
as they are received. The largest improvements to throughput come
from increasing the batch size due to efficiency gains in the GPU with larger
batches.
<details>
<summary>Triton Performance Offline Test Command</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
```
</details>
#### Triton Performance Online Test
We want to maximize throughput within latency budget constraints.
Dynamic batching is a feature of Triton Inference Server that allows
inference requests to be combined by the server, so that a batch is
created dynamically, resulting in a reduced average latency.
<details>
<summary>Triton Performance Online Test</summary>
```shell
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
```
</details>
### Latency explanation
A typical Triton Inference Server pipeline can be broken down into the following steps:
1. The client serializes the inference request into a message and sends it to
the server (Client Send).
2. The message travels over the network from the client to the server (Network).
3. The message arrives at the server and is deserialized (Server Receive).
4. The request is placed on the queue (Server Queue).
5. The request is removed from the queue and computed (Server Compute).
6. The completed request is serialized in a message and sent back to
the client (Server Send).
7. The completed message then travels over the network from the server
to the client (Network).
8. The completed message is deserialized by the client and processed as
a completed inference request (Client Receive).
Generally, for local clients, steps 1-4 and 6-8 will only occupy
a small fraction of time, compared to step 5. In distributed systems and online processing
where client and server side are connect through network, the send and receive steps might have impact
on overall processing performance. In order to analyze the possible bottlenecks the detailed
charts are presented in online scenario cases.
## Release Notes
We’re constantly refining and improving our performance on AI
and HPC workloads even on the same hardware with frequent updates
to our software stack. For our latest performance data refer
to these pages for
[AI](https://developer.nvidia.com/deep-learning-performance-training-inference)
and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
May 2022
- Initial release
### Known issues
- There are no known issues with this model. |
TensorFlow/Segmentation/UNet_3D_Medical/runtime | runtime | parse_results | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Parsing of results"""
import os
import argparse
def parse_convergence_results(path, environment):
""" Parse convergence results utility
:param path: Path to results
:param environment: System environment
"""
whole_tumor = []
tumor_core = []
peritumoral_edema = []
enhancing_tumor = []
mean_dice = []
logfiles = [f for f in os.listdir(path) if "log" in f and environment in f]
if not logfiles:
raise FileNotFoundError("No logfile found at {}".format(path))
for logfile in logfiles:
with open(os.path.join(path, logfile), "r") as file_item:
content = file_item.readlines()
if "tumor_core" not in content[-1]:
print("Evaluation score not found. The file", logfile, "might be corrupted.")
continue
content = content[-1].split("()")[1]
whole_tumor.append(float([val for val in content.split(" ")
if "whole_tumor" in val][0].split()[-1]))
tumor_core.append(float([val for val in content.split(" ")
if "tumor_core" in val][0].split()[-1]))
peritumoral_edema.append(float([val for val in content.split(" ")
if "peritumoral_edema" in val][0].split()[-1]))
enhancing_tumor.append(float([val for val in content.split(" ")
if "enhancing_tumor" in val][0].split()[-1]))
mean_dice.append(float([val for val in content.split(" ")
if "mean_dice" in val][0].split()[-1]))
if whole_tumor:
print("Evaluation average dice score:", sum(mean_dice) / len(mean_dice))
print("Evaluation whole tumor dice score:", sum(whole_tumor) / len(whole_tumor))
print("Evaluation tumor core dice score:", sum(tumor_core) / len(tumor_core))
print("Evaluation peritumoral edema dice score:", sum(peritumoral_edema) / len(peritumoral_edema))
print("Evaluation enhancing tumor dice score:", sum(enhancing_tumor) / len(enhancing_tumor))
else:
print("All logfiles were corrupted, no loss was obtained.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir',
type=str,
required=True)
parser.add_argument('--env',
type=str,
required=True)
args = parser.parse_args()
parse_convergence_results(path=args.model_dir, environment=args.env)
|
TensorFlow/Detection/SSD/models/research/object_detection | object_detection | export_tflite_ssd_graph | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Exports an SSD detection model to use with tf-lite.
Outputs file:
* A tflite compatible frozen graph - $output_directory/tflite_graph.pb
The exported graph has the following input and output nodes.
Inputs:
'normalized_input_image_tensor': a float32 tensor of shape
[1, height, width, 3] containing the normalized input image. Note that the
height and width must be compatible with the height and width configured in
the fixed_shape_image resizer options in the pipeline config proto.
In floating point Mobilenet model, 'normalized_image_tensor' has values
between [-1,1). This typically means mapping each pixel (linearly)
to a value between [-1, 1]. Input image
values between 0 and 255 are scaled by (1/128.0) and then a value of
-1 is added to them to ensure the range is [-1,1).
In quantized Mobilenet model, 'normalized_image_tensor' has values between [0,
255].
In general, see the `preprocess` function defined in the feature extractor class
in the object_detection/models directory.
Outputs:
If add_postprocessing_op is true: frozen graph adds a
TFLite_Detection_PostProcess custom op node has four outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected boxes
else:
the graph has two outputs:
'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
containing the encoded box predictions.
'raw_outputs/class_predictions': a float32 tensor of shape
[1, num_anchors, num_classes] containing the class scores for each anchor
after applying score conversion.
Example Usage:
--------------
python object_detection/export_tflite_ssd_graph \
--pipeline_config_path path/to/ssd_mobilenet.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
The expected output would be in the directory
path/to/exported_model_directory (which is created if it does not exist)
with contents:
- tflite_graph.pbtxt
- tflite_graph.pb
Config overrides (see the `config_override` flag) are text protobufs
(also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override
certain fields in the provided pipeline_config_path. These are useful for
making small changes to the inference graph that differ from the training or
eval config.
Example Usage (in which we change the NMS iou_threshold to be 0.5 and
NMS score_threshold to be 0.0):
python object_detection/export_tflite_ssd_graph \
--pipeline_config_path path/to/ssd_mobilenet.config \
--trained_checkpoint_prefix path/to/model.ckpt \
--output_directory path/to/exported_model_directory
--config_override " \
model{ \
ssd{ \
post_processing { \
batch_non_max_suppression { \
score_threshold: 0.0 \
iou_threshold: 0.5 \
} \
} \
} \
} \
"
"""
import tensorflow as tf
from google.protobuf import text_format
from object_detection import export_tflite_ssd_graph_lib
from object_detection.protos import pipeline_pb2
flags = tf.app.flags
flags.DEFINE_string('output_directory', None, 'Path to write outputs.')
flags.DEFINE_string(
'pipeline_config_path', None,
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file.')
flags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.')
flags.DEFINE_integer('max_detections', 10,
'Maximum number of detections (boxes) to show.')
flags.DEFINE_integer('max_classes_per_detection', 1,
'Number of classes to display per detection box.')
flags.DEFINE_integer(
'detections_per_class', 100,
'Number of anchors used per class in Regular Non-Max-Suppression.')
flags.DEFINE_bool('add_postprocessing_op', True,
'Add TFLite custom op for postprocessing to the graph.')
flags.DEFINE_bool(
'use_regular_nms', False,
'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.')
flags.DEFINE_string(
'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig '
'text proto to override pipeline_config_path.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
flags.mark_flag_as_required('output_directory')
flags.mark_flag_as_required('pipeline_config_path')
flags.mark_flag_as_required('trained_checkpoint_prefix')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
text_format.Merge(FLAGS.config_override, pipeline_config)
export_tflite_ssd_graph_lib.export_tflite_graph(
pipeline_config, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory,
FLAGS.add_postprocessing_op, FLAGS.max_detections,
FLAGS.max_classes_per_detection, FLAGS.use_regular_nms)
if __name__ == '__main__':
tf.app.run(main)
|
TensorFlow2/LanguageModeling/BERT/official/utils/logs | logs | cloud_lib | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities that interact with cloud service.
"""
import requests
GCP_METADATA_URL = "http://metadata/computeMetadata/v1/instance/hostname"
GCP_METADATA_HEADER = {"Metadata-Flavor": "Google"}
def on_gcp():
"""Detect whether the current running environment is on GCP."""
try:
# Timeout in 5 seconds, in case the test environment has connectivity issue.
# There is not default timeout, which means it might block forever.
response = requests.get(
GCP_METADATA_URL, headers=GCP_METADATA_HEADER, timeout=5)
return response.status_code == 200
except requests.exceptions.RequestException:
return False
|
PyTorch/Classification/ConvNets | ConvNets | main | # Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
os.environ[
"KMP_AFFINITY"
] = "disabled" # We need to do this before importing anything else as a workaround for this bug: https://github.com/pytorch/pytorch/issues/28389
import argparse
import random
from copy import deepcopy
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import image_classification.logger as log
from image_classification.smoothing import LabelSmoothing
from image_classification.mixup import NLLMultiLabelSmooth, MixUpWrapper
from image_classification.dataloaders import *
from image_classification.training import *
from image_classification.utils import *
from image_classification.models import (
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
)
from image_classification.optimizers import (
get_optimizer,
lr_cosine_policy,
lr_linear_policy,
lr_step_policy,
)
from image_classification.gpu_affinity import set_affinity, AffinityMode
import dllogger
def available_models():
models = {
m.name: m
for m in [
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
]
}
return models
def add_parser_arguments(parser, skip_arch=False):
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"--data-backend",
metavar="BACKEND",
default="dali-cpu",
choices=DATA_BACKEND_CHOICES,
help="data backend: "
+ " | ".join(DATA_BACKEND_CHOICES)
+ " (default: dali-cpu)",
)
parser.add_argument(
"--interpolation",
metavar="INTERPOLATION",
default="bilinear",
help="interpolation type for resizing images: bilinear, bicubic or triangular(DALI only)",
)
if not skip_arch:
model_names = available_models().keys()
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="resnet50",
choices=model_names,
help="model architecture: "
+ " | ".join(model_names)
+ " (default: resnet50)",
)
parser.add_argument(
"-j",
"--workers",
default=5,
type=int,
metavar="N",
help="number of data loading workers (default: 5)",
)
parser.add_argument(
"--prefetch",
default=2,
type=int,
metavar="N",
help="number of samples prefetched by each loader",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--run-epochs",
default=-1,
type=int,
metavar="N",
help="run only N epochs, used for checkpointing runs",
)
parser.add_argument(
"--early-stopping-patience",
default=-1,
type=int,
metavar="N",
help="early stopping after N epochs without validation accuracy improving",
)
parser.add_argument(
"--image-size", default=None, type=int, help="resolution of image"
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256) per gpu",
)
parser.add_argument(
"--optimizer-batch-size",
default=-1,
type=int,
metavar="N",
help="size of a total batch size, for simulating bigger batches using gradient accumulation",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument(
"--lr-schedule",
default="step",
type=str,
metavar="SCHEDULE",
choices=["step", "linear", "cosine"],
help="Type of LR schedule: {}, {}, {}".format("step", "linear", "cosine"),
)
parser.add_argument("--end-lr", default=0, type=float)
parser.add_argument(
"--warmup", default=0, type=int, metavar="E", help="number of warmup epochs"
)
parser.add_argument(
"--label-smoothing",
default=0.0,
type=float,
metavar="S",
help="label smoothing",
)
parser.add_argument(
"--mixup", default=0.0, type=float, metavar="ALPHA", help="mixup alpha"
)
parser.add_argument(
"--optimizer", default="sgd", type=str, choices=("sgd", "rmsprop")
)
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="momentum"
)
parser.add_argument(
"--weight-decay",
"--wd",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
)
parser.add_argument(
"--bn-weight-decay",
action="store_true",
help="use weight_decay on batch normalization learnable parameters, (default: false)",
)
parser.add_argument(
"--rmsprop-alpha",
default=0.9,
type=float,
help="value of alpha parameter in rmsprop optimizer (default: 0.9)",
)
parser.add_argument(
"--rmsprop-eps",
default=1e-3,
type=float,
help="value of eps parameter in rmsprop optimizer (default: 1e-3)",
)
parser.add_argument(
"--nesterov",
action="store_true",
help="use nesterov momentum, (default: false)",
)
parser.add_argument(
"--print-freq",
"-p",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default=None,
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--static-loss-scale",
type=float,
default=1,
help="Static loss scale, positive power of 2 values can improve amp convergence.",
)
parser.add_argument(
"--prof", type=int, default=-1, metavar="N", help="Run only N iterations"
)
parser.add_argument(
"--amp",
action="store_true",
help="Run model AMP (automatic mixed precision) mode.",
)
parser.add_argument(
"--seed", default=None, type=int, help="random seed used for numpy and pytorch"
)
parser.add_argument(
"--gather-checkpoints",
default="0",
type=int,
help=(
"Gather N last checkpoints throughout the training,"
" without this flag only best and last checkpoints will be stored. "
"Use -1 for all checkpoints"
),
)
parser.add_argument(
"--raport-file",
default="experiment_raport.json",
type=str,
help="file in which to store JSON experiment raport",
)
parser.add_argument(
"--evaluate", action="store_true", help="evaluate checkpoint/model"
)
parser.add_argument("--training-only", action="store_true", help="do not evaluate")
parser.add_argument(
"--no-checkpoints",
action="store_false",
dest="save_checkpoints",
help="do not store any checkpoints, useful for benchmarking",
)
parser.add_argument(
"--jit",
type=str,
default="no",
choices=["no", "script"],
help="no -> do not use torch.jit; script -> use torch.jit.script",
)
parser.add_argument("--checkpoint-filename", default="checkpoint.pth.tar", type=str)
parser.add_argument(
"--workspace",
type=str,
default="./",
metavar="DIR",
help="path to directory where checkpoints will be stored",
)
parser.add_argument(
"--memory-format",
type=str,
default="nchw",
choices=["nchw", "nhwc"],
help="memory layout, nchw or nhwc",
)
parser.add_argument("--use-ema", default=None, type=float, help="use EMA")
parser.add_argument(
"--augmentation",
type=str,
default=None,
choices=[None, "autoaugment"],
help="augmentation method",
)
parser.add_argument(
"--gpu-affinity",
type=str,
default="none",
required=False,
choices=[am.name for am in AffinityMode],
)
parser.add_argument(
"--topk",
type=int,
default=5,
required=False,
)
def prepare_for_training(args, model_args, model_arch):
args.distributed = False
if "WORLD_SIZE" in os.environ:
args.distributed = int(os.environ["WORLD_SIZE"]) > 1
args.local_rank = int(os.environ["LOCAL_RANK"])
else:
args.local_rank = 0
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend="nccl", init_method="env://")
args.world_size = torch.distributed.get_world_size()
affinity = set_affinity(args.gpu, mode=args.gpu_affinity)
print(f"Training process {args.local_rank} affinity: {affinity}")
if args.seed is not None:
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed + args.local_rank)
torch.cuda.manual_seed(args.seed + args.local_rank)
np.random.seed(seed=args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
def _worker_init_fn(id):
# Worker process should inherit its affinity from parent
affinity = os.sched_getaffinity(0)
print(f"Process {args.local_rank} Worker {id} set affinity to: {affinity}")
np.random.seed(seed=args.seed + args.local_rank + id)
random.seed(args.seed + args.local_rank + id)
else:
def _worker_init_fn(id):
# Worker process should inherit its affinity from parent
affinity = os.sched_getaffinity(0)
print(f"Process {args.local_rank} Worker {id} set affinity to: {affinity}")
if args.static_loss_scale != 1.0:
if not args.amp:
print("Warning: if --amp is not used, static_loss_scale will be ignored.")
if args.optimizer_batch_size < 0:
batch_size_multiplier = 1
else:
tbs = args.world_size * args.batch_size
if args.optimizer_batch_size % tbs != 0:
print(
"Warning: simulated batch size {} is not divisible by actual batch size {}".format(
args.optimizer_batch_size, tbs
)
)
batch_size_multiplier = int(args.optimizer_batch_size / tbs)
print("BSM: {}".format(batch_size_multiplier))
start_epoch = 0
best_prec1 = 0
# optionally resume from a checkpoint
if args.resume is not None:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(
args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu)
)
start_epoch = checkpoint["epoch"]
best_prec1 = checkpoint["best_prec1"]
model_state = checkpoint["state_dict"]
optimizer_state = checkpoint["optimizer"]
if "state_dict_ema" in checkpoint:
model_state_ema = checkpoint["state_dict_ema"]
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
if start_epoch >= args.epochs:
print(
f"Launched training for {args.epochs}, checkpoint already run {start_epoch}"
)
exit(1)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
model_state = None
model_state_ema = None
optimizer_state = None
else:
model_state = None
model_state_ema = None
optimizer_state = None
loss = nn.CrossEntropyLoss
if args.mixup > 0.0:
loss = lambda: NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
loss = lambda: LabelSmoothing(args.label_smoothing)
memory_format = (
torch.channels_last if args.memory_format == "nhwc" else torch.contiguous_format
)
model = model_arch(
**{
k: v
if k != "pretrained"
else v and (not args.distributed or dist.get_rank() == 0)
for k, v in model_args.__dict__.items()
}
)
image_size = (
args.image_size
if args.image_size is not None
else model.arch.default_image_size
)
scaler = torch.cuda.amp.GradScaler(
init_scale=args.static_loss_scale,
growth_factor=2,
backoff_factor=0.5,
growth_interval=100,
enabled=args.amp,
)
executor = Executor(
model,
loss(),
cuda=True,
memory_format=memory_format,
amp=args.amp,
scaler=scaler,
divide_loss=batch_size_multiplier,
ts_script=args.jit == "script",
)
# Create data loaders and optimizers as needed
if args.data_backend == "pytorch":
get_train_loader = get_pytorch_train_loader
get_val_loader = get_pytorch_val_loader
elif args.data_backend == "dali-gpu":
get_train_loader = get_dali_train_loader(dali_cpu=False)
get_val_loader = get_dali_val_loader()
elif args.data_backend == "dali-cpu":
get_train_loader = get_dali_train_loader(dali_cpu=True)
get_val_loader = get_dali_val_loader()
elif args.data_backend == "synthetic":
get_val_loader = get_synthetic_loader
get_train_loader = get_synthetic_loader
else:
print("Bad databackend picked")
exit(1)
train_loader, train_loader_len = get_train_loader(
args.data,
image_size,
args.batch_size,
model_args.num_classes,
args.mixup > 0.0,
interpolation=args.interpolation,
augmentation=args.augmentation,
start_epoch=start_epoch,
workers=args.workers,
_worker_init_fn=_worker_init_fn,
memory_format=memory_format,
prefetch_factor=args.prefetch,
)
if args.mixup != 0.0:
train_loader = MixUpWrapper(args.mixup, train_loader)
val_loader, val_loader_len = get_val_loader(
args.data,
image_size,
args.batch_size,
model_args.num_classes,
False,
interpolation=args.interpolation,
workers=args.workers,
_worker_init_fn=_worker_init_fn,
memory_format=memory_format,
prefetch_factor=args.prefetch,
)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger = log.Logger(
args.print_freq,
[
dllogger.StdOutBackend(
dllogger.Verbosity.DEFAULT, step_format=log.format_step
),
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE,
os.path.join(args.workspace, args.raport_file),
),
],
start_epoch=start_epoch - 1,
)
else:
logger = log.Logger(args.print_freq, [], start_epoch=start_epoch - 1)
logger.log_parameter(args.__dict__, verbosity=dllogger.Verbosity.DEFAULT)
logger.log_parameter(
{f"model.{k}": v for k, v in model_args.__dict__.items()},
verbosity=dllogger.Verbosity.DEFAULT,
)
optimizer = get_optimizer(
list(executor.model.named_parameters()),
args.lr,
args=args,
state=optimizer_state,
)
if args.lr_schedule == "step":
lr_policy = lr_step_policy(args.lr, [30, 60, 80], 0.1, args.warmup)
elif args.lr_schedule == "cosine":
lr_policy = lr_cosine_policy(
args.lr, args.warmup, args.epochs, end_lr=args.end_lr
)
elif args.lr_schedule == "linear":
lr_policy = lr_linear_policy(args.lr, args.warmup, args.epochs)
if args.distributed:
executor.distributed(args.gpu)
if model_state is not None:
executor.model.load_state_dict(model_state)
trainer = Trainer(
executor,
optimizer,
grad_acc_steps=batch_size_multiplier,
ema=args.use_ema,
)
if (args.use_ema is not None) and (model_state_ema is not None):
trainer.ema_executor.model.load_state_dict(model_state_ema)
return (
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
best_prec1,
)
def main(args, model_args, model_arch):
exp_start_time = time.time()
(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
best_prec1,
) = prepare_for_training(args, model_args, model_arch)
train_loop(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch=start_epoch,
end_epoch=min((start_epoch + args.run_epochs), args.epochs)
if args.run_epochs != -1
else args.epochs,
early_stopping_patience=args.early_stopping_patience,
best_prec1=best_prec1,
prof=args.prof,
skip_training=args.evaluate,
skip_validation=args.training_only,
save_checkpoints=args.save_checkpoints and not args.evaluate,
checkpoint_dir=args.workspace,
checkpoint_filename=args.checkpoint_filename,
keep_last_n_checkpoints=args.gather_checkpoints,
topk=args.topk,
)
exp_duration = time.time() - exp_start_time
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == "__main__":
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
add_parser_arguments(parser)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
print(model_args)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.