relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
PyTorch/LanguageModeling/BERT | BERT | optimization | # coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
#from fused_adam_local import FusedAdam
from apex.optimizers import FusedAdam
from apex.multi_tensor_apply import multi_tensor_applier
import amp_C
from utils import is_main_process
multi_tensor_l2norm = amp_C.multi_tensor_l2norm
lamb_compute_update = amp_C.multi_tensor_lamb_stage1_cuda
lamb_apply_update = amp_C.multi_tensor_lamb_stage2_cuda
scale = amp_C.multi_tensor_scale
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return max((x - 1. )/ (warmup - 1.), 0.)
def warmup_poly(x, warmup=0.002, degree=0.5):
if x < warmup:
return x/warmup
return (1.0 - x)**degree
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
'warmup_poly':warmup_poly,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'], error_if_nonfinite=False)
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss
|
TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular | nvtabular | preproc | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
os.environ["TF_MEMORY_ALLOCATION"] = "0.0"
from data.outbrain.nvtabular.utils.arguments import parse_args
from data.outbrain.nvtabular.utils.setup import create_config
from data.outbrain.nvtabular.utils.workflow import execute_pipeline
from data.outbrain.features import get_outbrain_feature_spec
def is_empty(path):
return not (os.path.exists(path) and (os.path.isfile(path) or os.listdir(path)))
def main():
args = parse_args()
config = create_config(args)
if is_empty(args.metadata_path):
logging.warning(
"Creating parquets into {}".format(config["output_bucket_folder"])
)
execute_pipeline(config)
save_feature_spec(config["output_bucket_folder"])
else:
logging.warning(f"Directory exists {args.metadata_path}")
logging.warning("Skipping NVTabular preprocessing")
def save_feature_spec(base_directory):
feature_spec = get_outbrain_feature_spec(base_directory)
fspec_path = os.path.join(base_directory, 'feature_spec.yaml')
feature_spec.to_yaml(output_path=fspec_path)
if __name__ == "__main__":
main()
|
TensorFlow/Classification/ConvNets/runtime | runtime | runner | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import time
import multiprocessing
import warnings
import tensorflow as tf
import numpy as np
from model import resnet
from utils import hooks
from utils import data_utils
from utils import hvd_wrapper as hvd
from runtime import runner_utils
import dllogger
import random
__all__ = [
'Runner',
]
class Runner(object):
def __init__(
self,
# ========= Model HParams ========= #
n_classes=1001,
architecture='resnet50',
input_format='NHWC', # NCHW or NHWC
compute_format='NCHW', # NCHW or NHWC
dtype=tf.float32, # tf.float32 or tf.float16
n_channels=3,
height=224,
width=224,
distort_colors=False,
model_dir=None,
log_dir=None,
data_dir=None,
data_idx_dir=None,
weight_init="fan_out",
# ======= Optimization HParams ======== #
use_xla=False,
use_tf_amp=False,
use_dali=False,
use_cpu=False,
gpu_memory_fraction=1.0,
gpu_id=0,
# ======== Debug Flags ======== #
debug_verbosity=0,
seed=None):
if dtype not in [tf.float32, tf.float16]:
raise ValueError(
"Unknown dtype received: %s (allowed: `tf.float32` and `tf.float16`)" % dtype)
if compute_format not in ["NHWC", 'NCHW']:
raise ValueError(
"Unknown `compute_format` received: %s (allowed: ['NHWC', 'NCHW'])" % compute_format)
if input_format not in ["NHWC", 'NCHW']:
raise ValueError(
"Unknown `input_format` received: %s (allowed: ['NHWC', 'NCHW'])" % input_format)
if n_channels not in [1, 3]:
raise ValueError(
"Unsupported number of channels: %d (allowed: 1 (grayscale) and 3 (color))" % n_channels)
if seed is not None:
seed = seed * 2 + hvd.rank()
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
# ============================================
# Optimsation Flags - Do not remove
# ============================================
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = '2'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
os.environ['TF_DISABLE_NVTX_RANGES'] = '1'
os.environ["TF_XLA_FLAGS"] = (os.environ.get(
"TF_XLA_FLAGS", "") + " --tf_xla_enable_lazy_compilation=false")
# ============================================
# TF-AMP Setup - Do not remove
# ============================================
if dtype == tf.float16:
if use_tf_amp:
raise RuntimeError(
"TF AMP can not be activated for FP16 precision")
elif use_tf_amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
else:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "0"
# =================================================
model_hparams = tf.contrib.training.HParams(width=height,
height=width,
n_channels=n_channels,
n_classes=n_classes,
dtype=dtype,
input_format=input_format,
compute_format=compute_format,
distort_colors=distort_colors,
seed=seed)
num_preprocessing_threads = 10 if not use_dali else 4
run_config_performance = tf.contrib.training.HParams(num_preprocessing_threads=num_preprocessing_threads,
use_tf_amp=use_tf_amp,
use_xla=use_xla,
use_dali=use_dali,
use_cpu=use_cpu,
gpu_memory_fraction=gpu_memory_fraction,
gpu_id=gpu_id)
run_config_additional = tf.contrib.training.HParams(
model_dir=model_dir,
log_dir=log_dir if hvd.rank() == 0 else None,
data_dir=data_dir,
data_idx_dir=data_idx_dir,
num_preprocessing_threads=num_preprocessing_threads)
self.run_hparams = Runner._build_hparams(
model_hparams, run_config_additional, run_config_performance)
model_name = architecture
architecture = resnet.model_architectures[architecture]
self._model = resnet.ResnetModel(model_name=model_name,
n_classes=model_hparams.n_classes,
layers_count=architecture["layers"],
layers_depth=architecture["widths"],
expansions=architecture["expansions"],
input_format=model_hparams.input_format,
compute_format=model_hparams.compute_format,
dtype=model_hparams.dtype,
weight_init=weight_init,
use_dali=use_dali,
use_cpu=use_cpu,
cardinality=architecture['cardinality'] if 'cardinality' in architecture else 1,
use_se=architecture['use_se'] if 'use_se' in architecture else False,
se_ratio=architecture['se_ratio'] if 'se_ratio' in architecture else 1)
self.training_logging_hook = None
self.eval_logging_hook = None
@staticmethod
def _build_hparams(*args):
hparams = tf.contrib.training.HParams()
for _hparams in args:
if not isinstance(_hparams, tf.contrib.training.HParams):
raise ValueError(
"Non valid HParams argument object detected:", _hparams)
for key, val in _hparams.values().items():
try:
hparams.add_hparam(name=key, value=val)
except ValueError:
warnings.warn(
"the parameter `{}` already exists - existing value: {} and duplicated value: {}".format(
key, hparams.get(key), val))
return hparams
@staticmethod
def _get_global_batch_size(worker_batch_size):
return worker_batch_size * hvd.size()
@staticmethod
def _get_session_config(mode, use_xla, use_dali, use_cpu, gpu_memory_fraction, gpu_id=0):
if mode not in ["train", 'validation', 'benchmark', 'inference']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark', 'inference')" %
mode)
config = tf.ConfigProto()
if not use_cpu:
# Limit available GPU memory (tune the size)
if use_dali:
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = False
else:
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
config.gpu_options.visible_device_list = str(gpu_id)
config.gpu_options.force_gpu_compatible = True # Force pinned memory
if hvd.size() > 1:
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.gpu_options.force_gpu_compatible = True # Force pinned memory
if use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
if mode == 'train':
if not use_cpu:
config.intra_op_parallelism_threads = 1 # Avoid pool of Eigen threads
config.inter_op_parallelism_threads = max(
2, (multiprocessing.cpu_count() // max(hvd.size(), 8) - 2))
return config
@staticmethod
def _get_run_config(mode, model_dir, use_xla, use_dali, use_cpu, gpu_memory_fraction, gpu_id=0, seed=None):
if mode not in ["train", 'validation', 'benchmark', 'inference']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark', 'inference')" %
mode)
config = tf.estimator.RunConfig(
model_dir=model_dir,
tf_random_seed=seed,
save_summary_steps=100 if mode in [
'train', 'validation'] else 1e9, # disabled in benchmark mode
save_checkpoints_steps=None,
save_checkpoints_secs=None,
session_config=Runner._get_session_config(mode=mode,
use_xla=use_xla,
use_dali=use_dali,
use_cpu=use_cpu,
gpu_memory_fraction=gpu_memory_fraction,
gpu_id=gpu_id),
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=1e6, # disabled
log_step_count_steps=1e9,
train_distribute=None,
device_fn=None,
protocol=None,
eval_distribute=None,
experimental_distribute=None)
if mode == 'train':
config = config.replace(save_checkpoints_steps=1000 if hvd.rank() == 0 else None,
keep_checkpoint_every_n_hours=3)
return config
def _get_estimator(self, mode, run_params, use_xla, use_dali, gpu_memory_fraction, gpu_id=0):
if mode not in ["train", 'validation', 'benchmark', 'inference']:
raise ValueError("Unknown mode received: %s (allowed: 'train', 'validation', 'benchmark', 'inference')" %
mode)
run_config = Runner._get_run_config(mode=mode,
model_dir=self.run_hparams.model_dir,
use_xla=use_xla,
use_dali=use_dali,
use_cpu=self.run_hparams.use_cpu,
gpu_memory_fraction=gpu_memory_fraction,
gpu_id=gpu_id,
seed=self.run_hparams.seed)
return tf.estimator.Estimator(model_fn=self._model,
model_dir=self.run_hparams.model_dir,
config=run_config,
params=run_params)
def train(self,
iter_unit,
num_iter,
run_iter,
batch_size,
warmup_steps=50,
weight_decay=1e-4,
lr_init=0.1,
lr_warmup_epochs=5,
momentum=0.9,
log_every_n_steps=1,
loss_scale=256,
label_smoothing=0.0,
mixup=0.0,
use_cosine_lr=False,
use_static_loss_scaling=False,
is_benchmark=False,
quantize=False,
symmetric=False,
quant_delay=0,
finetune_checkpoint=None,
use_final_conv=False,
use_qdq=False):
if iter_unit not in ["epoch", "batch"]:
raise ValueError(
'`iter_unit` value is unknown: %s (allowed: ["epoch", "batch"])' % iter_unit)
if self.run_hparams.data_dir is None and not is_benchmark:
raise ValueError('`data_dir` must be specified for training!')
if self.run_hparams.use_tf_amp or self.run_hparams.dtype == tf.float16:
if use_static_loss_scaling:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "0"
else:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "1"
else:
# Make sure it hasn't been set to True on FP32 training
use_static_loss_scaling = False
num_gpus = hvd.size()
global_batch_size = batch_size * num_gpus
if self.run_hparams.data_dir is not None:
filenames, num_samples, num_steps, num_epochs, num_decay_steps = runner_utils.parse_tfrecords_dataset(
data_dir=self.run_hparams.data_dir,
mode="train",
iter_unit=iter_unit,
num_iter=num_iter,
global_batch_size=global_batch_size,
)
steps_per_epoch = num_steps / num_epochs
else:
num_epochs = 1
num_steps = num_iter
steps_per_epoch = num_steps
num_decay_steps = num_steps
num_samples = num_steps * batch_size
if run_iter == -1:
run_iter = num_steps
else:
run_iter = steps_per_epoch * run_iter if iter_unit == "epoch" else run_iter
if self.run_hparams.use_dali and self.run_hparams.data_idx_dir is not None:
idx_filenames = runner_utils.parse_dali_idx_dataset(data_idx_dir=self.run_hparams.data_idx_dir,
mode="train")
training_hooks = []
if hvd.rank() == 0:
print('Starting Model Training...')
print("Training Epochs", num_epochs)
print("Total Steps", num_steps)
print("Steps per Epoch", steps_per_epoch)
print("Decay Steps", num_decay_steps)
print("Weight Decay Factor", weight_decay)
print("Init Learning Rate", lr_init)
print("Momentum", momentum)
print("Num GPUs", num_gpus)
print("Per-GPU Batch Size", batch_size)
if is_benchmark:
self.training_logging_hook = hooks.BenchmarkLoggingHook(
global_batch_size=global_batch_size, warmup_steps=warmup_steps, logging_steps=log_every_n_steps
)
else:
self.training_logging_hook = hooks.TrainingLoggingHook(
global_batch_size=global_batch_size,
num_steps=num_steps,
num_samples=num_samples,
num_epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
logging_steps=log_every_n_steps,
seed=self.run_hparams.seed,
)
training_hooks.append(self.training_logging_hook)
if hvd.size() > 1:
bcast_hook = hvd.hvd_global_object.BroadcastGlobalVariablesHook(0)
training_hooks.append(bcast_hook)
partition_hook = hooks.TrainingPartitionHook()
training_hooks.append(hooks.PrefillStagingAreasHook())
training_hooks.append(partition_hook)
estimator_params = {
'batch_size': batch_size,
'steps_per_epoch': steps_per_epoch,
'num_gpus': num_gpus,
'momentum': momentum,
'lr_init': lr_init,
'lr_warmup_epochs': lr_warmup_epochs,
'weight_decay': weight_decay,
'loss_scale': loss_scale,
'apply_loss_scaling': use_static_loss_scaling,
'label_smoothing': label_smoothing,
'mixup': mixup,
'num_decay_steps': num_decay_steps,
'use_cosine_lr': use_cosine_lr,
'use_final_conv': use_final_conv,
'quantize': quantize,
'use_qdq': use_qdq,
'symmetric': symmetric,
'quant_delay': quant_delay
}
if finetune_checkpoint:
estimator_params['finetune_checkpoint'] = finetune_checkpoint
image_classifier = self._get_estimator(mode='train',
run_params=estimator_params,
use_xla=self.run_hparams.use_xla,
use_dali=self.run_hparams.use_dali,
gpu_memory_fraction=self.run_hparams.gpu_memory_fraction,
gpu_id=self.run_hparams.gpu_id)
def training_data_fn():
if self.run_hparams.use_dali and self.run_hparams.data_idx_dir is not None:
if hvd.rank() == 0:
print("Using DALI input... ")
return data_utils.get_dali_input_fn(filenames=filenames,
idx_filenames=idx_filenames,
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
training=True,
distort_color=self.run_hparams.distort_colors,
num_threads=self.run_hparams.num_preprocessing_threads,
deterministic=False if self.run_hparams.seed is None else True)
elif self.run_hparams.data_dir is not None:
return data_utils.get_tfrecords_input_fn(filenames=filenames,
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
training=True,
distort_color=self.run_hparams.distort_colors,
num_threads=self.run_hparams.num_preprocessing_threads,
deterministic=False if self.run_hparams.seed is None else True)
else:
if hvd.rank() == 0:
print("Using Synthetic Data ...")
return data_utils.get_synth_input_fn(
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
num_channels=self.run_hparams.n_channels,
data_format=self.run_hparams.input_format,
num_classes=self.run_hparams.n_classes,
dtype=self.run_hparams.dtype,
)
try:
current_step = image_classifier.get_variable_value("global_step")
except ValueError:
current_step = 0
run_iter = max(0, min(run_iter, num_steps - current_step))
print("Current step:", current_step)
if run_iter > 0:
try:
image_classifier.train(
input_fn=training_data_fn,
steps=run_iter,
hooks=training_hooks,
)
except KeyboardInterrupt:
print("Keyboard interrupt")
if partition_hook.signal_recieved:
self.wait_after_eval = True
if hvd.rank() == 0:
if run_iter > 0:
print('Ending Model Training ...')
train_throughput = self.training_logging_hook.mean_throughput.value()
dllogger.log(
data={'train_throughput': train_throughput}, step=tuple())
else:
print('Model already trained required number of steps. Skipped')
def evaluate(
self,
iter_unit,
num_iter,
batch_size,
warmup_steps=50,
log_every_n_steps=1,
is_benchmark=False,
export_dir=None,
quantize=False,
symmetric=False,
use_qdq=False,
use_final_conv=False,
):
if iter_unit not in ["epoch", "batch"]:
raise ValueError(
'`iter_unit` value is unknown: %s (allowed: ["epoch", "batch"])' % iter_unit)
if self.run_hparams.data_dir is None and not is_benchmark:
raise ValueError('`data_dir` must be specified for evaluation!')
if hvd.rank() != 0:
raise RuntimeError('Multi-GPU inference is not supported')
estimator_params = {'quantize': quantize,
'symmetric': symmetric,
'use_qdq': use_qdq,
'use_final_conv': use_final_conv}
image_classifier = self._get_estimator(mode='validation',
run_params=estimator_params,
use_xla=self.run_hparams.use_xla,
use_dali=self.run_hparams.use_dali,
gpu_memory_fraction=self.run_hparams.gpu_memory_fraction,
gpu_id=self.run_hparams.gpu_id)
if self.run_hparams.data_dir is not None:
filenames, num_samples, num_steps, num_epochs, num_decay_steps = runner_utils.parse_tfrecords_dataset(
data_dir=self.run_hparams.data_dir,
mode="validation",
iter_unit=iter_unit,
num_iter=num_iter,
global_batch_size=batch_size,
)
else:
num_epochs = 1
num_decay_steps = -1
num_steps = num_iter
if self.run_hparams.use_dali and self.run_hparams.data_idx_dir is not None:
idx_filenames = runner_utils.parse_dali_idx_dataset(data_idx_dir=self.run_hparams.data_idx_dir,
mode="validation")
eval_hooks = []
if hvd.rank() == 0:
self.eval_logging_hook = hooks.BenchmarkLoggingHook(
global_batch_size=batch_size, warmup_steps=warmup_steps, logging_steps=log_every_n_steps
)
eval_hooks.append(self.eval_logging_hook)
print('Starting Model Evaluation...')
print("Evaluation Epochs", num_epochs)
print("Evaluation Steps", num_steps)
print("Decay Steps", num_decay_steps)
print("Global Batch Size", batch_size)
def evaluation_data_fn():
if self.run_hparams.use_dali and self.run_hparams.data_idx_dir is not None:
if hvd.rank() == 0:
print("Using DALI input... ")
return data_utils.get_dali_input_fn(filenames=filenames,
idx_filenames=idx_filenames,
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
training=False,
distort_color=self.run_hparams.distort_colors,
num_threads=self.run_hparams.num_preprocessing_threads,
deterministic=False if self.run_hparams.seed is None else True)
elif self.run_hparams.data_dir is not None:
return data_utils.get_tfrecords_input_fn(filenames=filenames,
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
training=False,
distort_color=self.run_hparams.distort_colors,
num_threads=self.run_hparams.num_preprocessing_threads,
deterministic=False if self.run_hparams.seed is None else True)
else:
print("Using Synthetic Data ...\n")
return data_utils.get_synth_input_fn(
batch_size=batch_size,
height=self.run_hparams.height,
width=self.run_hparams.width,
num_channels=self.run_hparams.n_channels,
data_format=self.run_hparams.input_format,
num_classes=self.run_hparams.n_classes,
dtype=self.run_hparams.dtype,
)
try:
eval_results = image_classifier.evaluate(
input_fn=evaluation_data_fn,
steps=num_steps,
hooks=eval_hooks,
)
eval_throughput = self.eval_logging_hook.mean_throughput.value()
if len(self.eval_logging_hook.latencies) > 0:
eval_latencies = np.array(
self.eval_logging_hook.latencies) * 1000
eval_latencies_q = np.quantile(
eval_latencies, q=[0.9, 0.95, 0.99])
eval_latencies_mean = np.mean(eval_latencies)
additional_metrics = {
'eval_latency_avg': eval_latencies_mean,
'eval_latency_p90': eval_latencies_q[0],
'eval_latency_p95': eval_latencies_q[1],
'eval_latency_p99': eval_latencies_q[2],
}
else:
additional_metrics = {}
dllogger.log(data={
'top1_accuracy': float(eval_results['top1_accuracy']),
'top5_accuracy': float(eval_results['top5_accuracy']),
'eval_throughput': eval_throughput,
**additional_metrics
},
step=tuple())
if export_dir is not None:
dllogger.log(data={'export_dir': export_dir}, step=tuple())
input_receiver_fn = data_utils.get_serving_input_receiver_fn(batch_size=None,
height=self.run_hparams.height,
width=self.run_hparams.width,
num_channels=self.run_hparams.n_channels,
data_format=self.run_hparams.input_format,
dtype=self.run_hparams.dtype)
self.exported_path = image_classifier.export_savedmodel(
export_dir, input_receiver_fn)
except KeyboardInterrupt:
print("Keyboard interrupt")
print('Model evaluation finished')
if hasattr(self, "wait_after_eval") and self.wait_after_eval == True:
time.sleep(3600)
def predict(self, to_predict, quantize=False, symmetric=False, use_qdq=False, use_final_conv=False):
estimator_params = {
'quantize': quantize,
'symmetric': symmetric,
'use_qdq': use_qdq,
'use_final_conv': use_final_conv
}
if to_predict is not None:
filenames = runner_utils.parse_inference_input(to_predict)
image_classifier = self._get_estimator(mode='inference',
run_params=estimator_params,
use_xla=self.run_hparams.use_xla,
use_dali=self.run_hparams.use_dali,
gpu_memory_fraction=self.run_hparams.gpu_memory_fraction)
inference_hooks = []
def inference_data_fn():
return data_utils.get_inference_input_fn(filenames=filenames,
height=self.run_hparams.height,
width=self.run_hparams.width,
num_threads=self.run_hparams.num_preprocessing_threads)
try:
inference_results = image_classifier.predict(input_fn=inference_data_fn,
predict_keys=None,
hooks=inference_hooks,
yield_single_examples=True)
for result in inference_results:
print(result['classes'], str(
result['probabilities'][result['classes']]))
except KeyboardInterrupt:
print("Keyboard interrupt")
print('Ending Inference ...')
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | box_predictor_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_predictor_builder."""
import mock
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.protos import box_predictor_pb2
from object_detection.protos import hyperparams_pb2
class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
min_depth: 2
max_depth: 16
num_layers_before_predictor: 2
use_dropout: false
dropout_keep_probability: 0.4
kernel_size: 3
box_code_size: 3
apply_sigmoid_to_scores: true
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._min_depth, 2)
self.assertEqual(box_predictor._max_depth, 16)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.4)
self.assertTrue(class_head._apply_sigmoid_to_scores)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(class_head._num_class_slots, 10)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertTrue(class_head._use_depthwise)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._min_depth, 0)
self.assertEqual(box_predictor._max_depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertTrue(class_head._use_dropout)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8)
self.assertFalse(class_head._apply_sigmoid_to_scores)
self.assertEqual(class_head._num_class_slots, 91)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertFalse(class_head._use_depthwise)
class WeightSharedConvolutionalBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_conv_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
(box_predictor_proto.weight_shared_convolutional_box_predictor
.conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_construct_non_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, False)
def test_construct_non_default_depthwise_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
depth: 2
num_layers_before_predictor: 2
kernel_size: 7
box_code_size: 3
class_prediction_bias_init: 4.0
use_depthwise: true
}
"""
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
(box_predictor_proto.weight_shared_convolutional_box_predictor.
conv_hyperparams.CopyFrom(hyperparams_proto))
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10,
add_background_class=False)
class_head = box_predictor._class_prediction_head
self.assertEqual(box_predictor._depth, 2)
self.assertEqual(box_predictor._num_layers_before_predictor, 2)
self.assertEqual(box_predictor._apply_batch_norm, False)
self.assertEqual(box_predictor._use_depthwise, True)
self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0)
self.assertEqual(box_predictor.num_classes, 10)
self.assertFalse(box_predictor._is_training)
def test_construct_default_conv_box_predictor(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, False)
def test_construct_default_conv_box_predictor_with_batch_norm(self):
box_predictor_text_proto = """
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l1_regularizer {
}
}
batch_norm {
train: true
}
initializer {
truncated_normal_initializer {
}
}
}
}"""
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=hyperparams_builder.build,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor._depth, 0)
self.assertEqual(box_predictor._num_layers_before_predictor, 0)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._apply_batch_norm, True)
class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_builder_calls_fc_argscope_fn(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
op: FC
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
mock_argscope_fn.assert_called_with(hyperparams_proto, False)
self.assertEqual(box_predictor._box_prediction_head._fc_hyperparams_fn,
'arg_scope')
self.assertEqual(box_predictor._class_prediction_head._fc_hyperparams_fn,
'arg_scope')
def test_non_default_mask_rcnn_box_predictor(self):
fc_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
op: FC
"""
box_predictor_text_proto = """
mask_rcnn_box_predictor {
use_dropout: true
dropout_keep_probability: 0.8
box_code_size: 3
share_box_across_classes: true
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto)
def mock_fc_argscope_builder(fc_hyperparams_arg, is_training):
return (fc_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_fc_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
self.assertTrue(box_head._use_dropout)
self.assertTrue(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.8)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 3)
self.assertEqual(box_head._share_box_across_classes, True)
def test_build_default_mask_rcnn_box_predictor(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor = box_predictor_builder.build(
argscope_fn=mock.Mock(return_value='arg_scope'),
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertEqual(len(box_predictor._third_stage_heads.keys()), 0)
def test_build_box_predictor_with_mask_branch(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 16
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 16
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
third_stage_heads = box_predictor._third_stage_heads
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertTrue(
mask_rcnn_box_predictor.MASK_PREDICTIONS in third_stage_heads)
self.assertEqual(
third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._mask_prediction_conv_depth, 512)
def test_build_box_predictor_with_convlve_then_upsample_masks(self):
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = (
hyperparams_pb2.Hyperparams.FC)
box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = (
hyperparams_pb2.Hyperparams.CONV)
box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True
box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512
box_predictor_proto.mask_rcnn_box_predictor.mask_height = 24
box_predictor_proto.mask_rcnn_box_predictor.mask_width = 24
box_predictor_proto.mask_rcnn_box_predictor.convolve_then_upsample_masks = (
True)
mock_argscope_fn = mock.Mock(return_value='arg_scope')
box_predictor = box_predictor_builder.build(
argscope_fn=mock_argscope_fn,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
mock_argscope_fn.assert_has_calls(
[mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams,
True),
mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams,
True)], any_order=True)
box_head = box_predictor._box_prediction_head
class_head = box_predictor._class_prediction_head
third_stage_heads = box_predictor._third_stage_heads
self.assertFalse(box_head._use_dropout)
self.assertFalse(class_head._use_dropout)
self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5)
self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_head._box_code_size, 4)
self.assertTrue(
mask_rcnn_box_predictor.MASK_PREDICTIONS in third_stage_heads)
self.assertEqual(
third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._mask_prediction_conv_depth, 512)
self.assertTrue(third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS]
._convolve_then_upsample)
class RfcnBoxPredictorBuilderTest(tf.test.TestCase):
def test_box_predictor_calls_fc_argscope_fn(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.0003
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.3
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=False,
num_classes=10)
(conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn
self.assertAlmostEqual((hyperparams_proto.regularizer.
l1_regularizer.weight),
(conv_hyperparams_actual.regularizer.l1_regularizer.
weight))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.stddev),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.stddev))
self.assertAlmostEqual((hyperparams_proto.initializer.
truncated_normal_initializer.mean),
(conv_hyperparams_actual.initializer.
truncated_normal_initializer.mean))
self.assertEqual(hyperparams_proto.activation,
conv_hyperparams_actual.activation)
self.assertFalse(is_training)
def test_non_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
box_predictor_text_proto = """
rfcn_box_predictor {
num_spatial_bins_height: 4
num_spatial_bins_width: 4
depth: 4
box_code_size: 3
crop_height: 16
crop_width: 16
}
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
text_format.Merge(box_predictor_text_proto, box_predictor_proto)
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 3)
self.assertEqual(box_predictor._num_spatial_bins, [4, 4])
self.assertEqual(box_predictor._crop_size, [16, 16])
def test_default_rfcn_box_predictor(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto)
def mock_conv_argscope_builder(conv_hyperparams_arg, is_training):
return (conv_hyperparams_arg, is_training)
box_predictor_proto = box_predictor_pb2.BoxPredictor()
box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom(
hyperparams_proto)
box_predictor = box_predictor_builder.build(
argscope_fn=mock_conv_argscope_builder,
box_predictor_config=box_predictor_proto,
is_training=True,
num_classes=90)
self.assertEqual(box_predictor.num_classes, 90)
self.assertTrue(box_predictor._is_training)
self.assertEqual(box_predictor._box_code_size, 4)
self.assertEqual(box_predictor._num_spatial_bins, [3, 3])
self.assertEqual(box_predictor._crop_size, [12, 12])
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/Translation/GNMT/utils | utils | evaluation_utils | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for evaluating various tasks, e.g., translation & summarization."""
import codecs
import os
import re
import subprocess
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.lib.io import file_io
from tensorflow.python.training import checkpoint_management as cm
def get_all_checkpoints(output_dir):
"""docstring."""
ckpt = cm.get_checkpoint_state(output_dir, None)
res = []
if not ckpt:
return None
for path in ckpt.all_model_checkpoint_paths:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = cm._prefix_to_checkpoint_path(path, saver_pb2.SaverDef.V2)
v1_path = cm._prefix_to_checkpoint_path(path, saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
res.append(path)
else:
tf.logging.error("Couldn't match files for checkpoint %s", path)
return res
|
DGLPyTorch/DrugDiscovery/SE3Transformer/tests | tests | utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import dgl
import torch
def get_random_graph(N, num_edges_factor=18):
graph = dgl.remove_self_loop(dgl.rand_graph(N, N * num_edges_factor))
return graph
def assign_relative_pos(graph, coords):
src, dst = graph.edges()
graph.edata['rel_pos'] = coords[src] - coords[dst]
return graph
def get_max_diff(a, b):
return (a - b).abs().max().item()
def rot_z(gamma):
return torch.tensor([
[torch.cos(gamma), -torch.sin(gamma), 0],
[torch.sin(gamma), torch.cos(gamma), 0],
[0, 0, 1]
], dtype=gamma.dtype)
def rot_y(beta):
return torch.tensor([
[torch.cos(beta), 0, torch.sin(beta)],
[0, 1, 0],
[-torch.sin(beta), 0, torch.cos(beta)]
], dtype=beta.dtype)
def rot(alpha, beta, gamma):
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
|
PyTorch/Translation/Transformer/fairseq/data | data | data_utils | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import itertools
import os
import numpy as np
import torch
import fairseq.data.batch_C
import sys
from .dictionary import Dictionary
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
for filename in os.listdir(path):
parts = filename.split('.')
if len(parts) >= 3 and len(parts[1].split('-')) == 2:
return parts[1].split('-')
return src, dst
def load_dictionaries(args):
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = infer_language_pair(args.data)
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
args.src_vocab_size = len(src_dict)
args.tgt_vocab_size = len(tgt_dict)
args.padding_idx = src_dict.pad()
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return src_dict, tgt_dict
class ShardedIterator(object):
"""A sharded wrapper around an iterable (padded to length)."""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
self._sharded_len = len(iterable) // num_shards
if len(iterable) % num_shards > 0:
self._sharded_len += 1
self.itr = itertools.zip_longest(
range(self._sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
)
def __len__(self):
return self._sharded_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count."""
def __init__(self, iterable):
self.iterable = iterable
self.count = 0
self.itr = iter(self)
def __len__(self):
return len(self.iterable)
def __iter__(self):
for x in self.iterable:
self.count += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
return self.count < len(self)
def skip(self, num_to_skip):
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def collate_tokens(values, pad_idx, eos_idx, left_pad, move_eos_to_beginning=False, pad_sequence=1):
"""Convert a list of 1d tensors into a padded 2d tensor."""
#size = max(v.size(0) for v in values)
orig_size = max(v.size(0) for v in values)
size = 0
if pad_sequence > 1:
size = orig_size // pad_sequence * pad_sequence
if orig_size % pad_sequence > 0:
size += pad_sequence
else:
size = orig_size
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def collate(samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, pad_sequence=1):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
pad_sequence,
)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get('target', None) is not None:
target = merge('target', left_pad=left_pad_target)
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
'target',
left_pad=left_pad_target,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
target = target.index_select(0, sort_order)
ntokens = sum(len(s['target']) for s in samples)
else:
ntokens = sum(len(s['source']) for s in samples)
return {
'id': id,
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
'prev_output_tokens': prev_output_tokens,
},
'target': target,
}
def get_dummy_batch(num_tokens, src_dict, tgt_dict, src_len=128, tgt_len=128,
left_pad_source=True, left_pad_target=False, pad_sequence=1):
bsz = num_tokens // max(src_len, tgt_len)
dummy_samples = [
{
'id': i,
'source': src_dict.dummy_sentence(src_len),
'target': tgt_dict.dummy_sentence(tgt_len) if tgt_dict is not None else None,
}
for i in range(bsz)
]
return collate(
dummy_samples, pad_idx=src_dict.pad(), eos_idx=src_dict.eos(),
left_pad_source=left_pad_source, left_pad_target=left_pad_target,
pad_sequence=pad_sequence,
)
class EpochBatchIterator(object):
"""Iterate over a FairseqDataset and yield batches bucketed by size.
Batches may contain sequences of different lengths. This iterator can be
reused across multiple epochs with the next_epoch_itr() method.
Args:
dataset: a FairseqDataset
max_tokens: max number of tokens in each batch
max_sentences: max number of sentences in each batch
max_positions: max sentence length supported by the model
required_batch_size_multiple: require batch size to be a multiple of N
seed: seed for random number generator for reproducibility
num_shards: shard the data iterator into N shards
shard_id: which shard of the data iterator to return
"""
def __init__(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
required_batch_size_multiple=1, seed=1,
num_shards=1, shard_id=0, epoch=0
):
self.dataset = dataset
self.max_tokens = max_tokens if max_tokens is not None else float('Inf')
self.max_sentences = max_sentences if max_sentences is not None else float('Inf')
self.max_positions = max_positions
self.bsz_mult = required_batch_size_multiple
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.epoch = epoch
self._cur_epoch_itr = None
self._next_epoch_itr = None
with numpy_seed(self.seed):
indices = self.dataset.ordered_indices(self.seed, self.epoch)
#need integer, rather than float('Inf') values
max_sentences = max_sentences if max_sentences is not None else sys.maxsize
max_positions_num = 1024
max_tokens = max_tokens if max_tokens is not None else sys.maxsize
#Following line is workaround due to the fact we cannot pass None object as argument
tgt_sizes = self.dataset.tgt_sizes if self.dataset.tgt_sizes is not None else self.dataset.src_sizes
batches = fairseq.data.batch_C.make_batches(
self.dataset.src_sizes, tgt_sizes, indices, max_tokens,
max_sentences, self.bsz_mult, max_positions_num)
self.frozen_batches = tuple(batches)
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True):
"""Shuffle batches and return a new iterator over the dataset."""
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._cur_epoch_itr
def end_of_epoch(self):
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.count
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.count
return 0
def state_dict(self):
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if itr_pos > 0:
# fast-forward epoch iterator
itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))
if itr_pos < len(itr):
self._next_epoch_itr = itr.skip(itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle):
if shuffle:
# set seed based on the seed and epoch number so that we get
# reproducible results when resuming from checkpoints
with numpy_seed(self.seed + epoch):
batches = list(self.frozen_batches) # copy
np.random.shuffle(batches)
else:
batches = self.frozen_batches
return CountingIterator(torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.dataset.collater,
num_workers=1,
batch_sampler=ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]),
))
@contextlib.contextmanager
def numpy_seed(seed):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
|
TensorFlow/LanguageModeling/BERT/biobert/scripts | scripts | run_biobert_finetuning_inference | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
task=${1:-"ner_bc5cdr-chem"}
init_checkpoint=${2:-"/results/biobert_tf_uncased_base/model.ckpt"}
bert_model=${3:-"base"}
cased=${4:-"false"}
precision=${5:-"fp16"}
use_xla=${6:-"true"}
batch_size=${7:-"16"}
if [ "$cased" = "true" ] ; then
DO_LOWER_CASE=0
CASING_DIR_PREFIX="cased"
case_flag="--do_lower_case=False"
else
DO_LOWER_CASE=1
CASING_DIR_PREFIX="uncased"
case_flag="--do_lower_case=True"
fi
if [ "$bert_model" = "large" ] ; then
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-24_H-1024_A-16
else
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-12_H-768_A-12
fi
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
use_fp16="--amp"
else
echo "fp32/tf32 activated!"
use_fp16="--noamp"
fi
if [ "$use_xla" = "true" ] ; then
use_xla_tag="--use_xla"
echo "XLA activated"
else
use_xla_tag="--nouse_xla"
fi
DATESTAMP=`date +'%y%m%d%H%M%S'`
if [ "$task" = "ner_bc5cdr-chem" ] ; then
printf -v TAG "tf_bert_biobert_ner_bc5cdr_chem_inference_%s_%s" "$bert_model" "$precision"
DATASET_DIR=/workspace/bert/data/biobert/BC5CDR/chem
OUTPUT_DIR=/results/${TAG}_${DATESTAMP}
python /workspace/bert/run_ner.py \
--do_prepare=true \
--do_eval=true \
--do_predict=true \
--task_name="bc5cdr" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint=$init_checkpoint \
--data_dir=$DATASET_DIR \
--output_dir=$OUTPUT_DIR \
--eval_batch_size=$batch_size \
--predict_batch_size=$batch_size \
--max_seq_length=128 \
$use_fp16 $use_xla_tag $case_flag
elif [ "$task" = "ner_bc5cdr-disease" ] ; then
printf -v TAG "tf_bert_biobert_ner_bc5cdr_disease_inference_%s_%s" "$bert_model" "$precision"
DATASET_DIR=/workspace/bert/data/biobert/BC5CDR/disease
OUTPUT_DIR=/results/${TAG}_${DATESTAMP}
python3 /workspace/bert/run_ner.py \
--do_prepare=true \
--do_eval=true \
--do_predict=true \
--task_name="bc5cdr" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint=$init_checkpoint \
--data_dir=$DATASET_DIR \
--output_dir=$OUTPUT_DIR \
--eval_batch_size=$batch_size \
--predict_batch_size=$batch_size \
--max_seq_length=128 \
"$use_fp16" $use_xla_tag $case_flag
elif [ "$task" = "rel_chemprot" ] ; then
printf -v TAG "tf_bert_biobert_rel_chemprot_inference_%s_%s_" "$bert_model" "$precision"
DATASET_DIR=/workspace/bert/data/biobert/chemprot-data_treeLSTM
OUTPUT_DIR=/results/${TAG}_${DATESTAMP}
python3 /workspace/bert/run_re.py \
--do_prepare=true \
--do_eval=true \
--do_predict=true \
--task_name="chemprot" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint=$init_checkpoint \
--data_dir=$DATASET_DIR \
--output_dir=$OUTPUT_DIR \
--eval_batch_size=$batch_size \
--predict_batch_size=$batch_size \
--max_seq_length=512 \
"$use_fp16" $use_xla_tag $case_flag
python3 /workspace/bert/biobert/re_eval.py --task=chemprot --output_path=$OUTPUT_DIR/test_results.tsv \
--answer_path=$DATASET_DIR/test.tsv |& tee $OUTPUT_DIR/test_results.txt
else
echo "Benchmarking for " $task "currently not supported. Sorry!"
fi |
PaddlePaddle/LanguageModeling/BERT/utils | utils | collate | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class Stack:
"""
Stacks the input data samples to construct the batch. The input samples
must have the same shape/length.
Args:
axis (int, optional): The axis in the result data along which the input
data are stacked. Default: 0.
dtype (str|numpy.dtype, optional): The value type of the output. If it
is set to None, the type of input data is used. Default: None.
"""
def __init__(self, axis=0, dtype=None):
self._axis = axis
self._dtype = dtype
def __call__(self, data):
"""
Batchifies the input data by stacking.
Args:
data (list[numpy.ndarray]): The input data samples. It is a list.
Each element is a numpy.ndarray or list.
Returns:
numpy.ndarray: Stacked batch data.
Example:
.. code-block:: python
from data import Stack
a = [1, 2, 3]
b = [4, 5, 6]
c = [7, 8, 9]
result = Stack()([a, b, c])
'''
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
'''
"""
data = np.stack(
data,
axis=self._axis).astype(self._dtype) if self._dtype else np.stack(
data, axis=self._axis)
return data
class Pad:
"""
Stacks the input data samples with padding.
Args:
pad_val (float|int, optional): The padding value. Default: 0.
axis (int, optional): The axis to pad the arrays. The arrays will be
padded to the largest dimension at axis. For example,
assume the input arrays have shape (10, 8, 5), (6, 8, 5), (3, 8, 5)
and the axis is 0. Each input will be padded into
(10, 8, 5) and then stacked to form the final output, which has
shape(3, 10, 8, 5). Default: 0.
ret_length (bool|numpy.dtype, optional): If it is bool, indicate whether
to return the valid length in the output, and the data type of
returned length is int32 if True. If it is numpy.dtype, indicate the
data type of returned length. Default: False.
dtype (numpy.dtype, optional): The value type of the output. If it is
set to None, the input data type is used. Default: None.
pad_right (bool, optional): Boolean argument indicating whether the
padding direction is right-side. If True, it indicates we pad to the right side,
while False indicates we pad to the left side. Default: True.
Example:
.. code-block:: python
from data import Pad
# Inputs are multiple lists
a = [1, 2, 3, 4]
b = [5, 6, 7]
c = [8, 9]
Pad(pad_val=0)([a, b, c])
'''
[[1, 2, 3, 4],
[5, 6, 7, 0],
[8, 9, 0, 0]]
'''
"""
def __init__(self,
pad_val=0,
axis=0,
ret_length=None,
dtype=None,
pad_right=True):
self._pad_val = pad_val
self._axis = axis
self._ret_length = ret_length
self._dtype = dtype
self._pad_right = pad_right
def __call__(self, data):
"""
Batchify the input data by padding. The input can be list of numpy.ndarray.
The arrays will be padded to the largest dimension at axis and then
stacked to form the final output. In addition, the function will output
the original dimensions at the axis if ret_length is not None.
Args:
data (list(numpy.ndarray)|list(list)): List of samples to pad and stack.
Returns:
numpy.ndarray|tuple: If `ret_length` is False, it is a numpy.ndarray \
representing the padded batch data and the shape is (N, …). \
Otherwise, it is a tuple, except for the padded batch data, the \
tuple also includes a numpy.ndarray representing all samples' \
original length shaped `(N,)`.
"""
arrs = [np.asarray(ele) for ele in data]
original_length = [ele.shape[self._axis] for ele in arrs]
max_size = max(original_length)
ret_shape = list(arrs[0].shape)
ret_shape[self._axis] = max_size
ret_shape = (len(arrs), ) + tuple(ret_shape)
ret = np.full(
shape=ret_shape,
fill_value=self._pad_val,
dtype=arrs[0].dtype if self._dtype is None else self._dtype)
for i, arr in enumerate(arrs):
if arr.shape[self._axis] == max_size:
ret[i] = arr
else:
slices = [slice(None) for _ in range(arr.ndim)]
if self._pad_right:
slices[self._axis] = slice(0, arr.shape[self._axis])
else:
slices[self._axis] = slice(
max_size - arr.shape[self._axis], max_size)
if slices[self._axis].start != slices[self._axis].stop:
slices = [slice(i, i + 1)] + slices
ret[tuple(slices)] = arr
if self._ret_length:
return ret, np.asarray(
original_length,
dtype="int32") if self._ret_length == True else np.asarray(
original_length, self._ret_length)
else:
return ret
class Tuple:
"""
Wrap multiple batchify functions together. The input functions will be applied
to the corresponding input fields.
Each sample should be a list or tuple containing multiple fields. The i'th
batchify function stored in Tuple will be applied on the i'th field.
For example, when data sample is (nd_data, label), you can wrap two batchify
functions using `Tuple(DataBatchify, LabelBatchify)` to batchify nd_data and
label correspondingly.
Args:
fn (list|tuple|callable): The batchify functions to wrap.
*args (tuple of callable): The additional batchify functions to wrap.
Example:
.. code-block:: python
from data import Tuple, Pad, Stack
batchify_fn = Tuple(Pad(axis=0, pad_val=0), Stack())
"""
def __init__(self, fn, *args):
if isinstance(fn, (list, tuple)):
assert len(args) == 0, f"Input pattern not understood. The input of Tuple can be " \
f"Tuple(A, B, C) or Tuple([A, B, C]) or Tuple((A, B, C)). " \
f"Received fn={fn}, args={args}"
self._fn = fn
else:
self._fn = (fn, ) + args
for i, ele_fn in enumerate(self._fn):
assert callable(
ele_fn
), f"Batchify functions must be callable! type(fn[{i}]) = {str(type(ele_fn))}"
def __call__(self, data):
"""
Batchify data samples by applying each function on the corresponding data
field, and each data field is produced by stacking the field data of samples.
Args:
data (list): The samples to batchfy. Each sample should contain N fields.
Returns:
tuple: A tuple composed of results from all including batchifying functions.
"""
assert len(data[0]) == len(self._fn), \
f"The number of attributes in each data sample should contain" \
f" {len(self._fn)} elements"
ret = []
for i, ele_fn in enumerate(self._fn):
result = ele_fn([ele[i] for ele in data])
if isinstance(result, (tuple, list)):
ret.extend(result)
else:
ret.append(result)
return tuple(ret)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2PrenetPlugin | taco2PrenetPlugin | CMakeLists | #
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
file(GLOB SRCS *.cpp *.cu)
set(PLUGIN_SOURCES ${PLUGIN_SOURCES} ${SRCS})
set(PLUGIN_SOURCES ${PLUGIN_SOURCES} PARENT_SCOPE)
|
PyTorch/SpeechRecognition/QuartzNet/common/text/unidecoder | unidecoder | homoglyphs | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
|
CUDA-Optimized/FastSpeech/fastspeech | fastspeech | perf_infer_ljspeech | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pprint
import sys
import time
import fire
import torch
from tqdm import tqdm
from fastspeech import DEFAULT_DEVICE
from fastspeech import hparam as hp
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.ljspeech_dataset import LJSpeechDataset
from fastspeech.model.fastspeech import Fastspeech
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy, to_device_async
from fastspeech.infer import get_inferencer
from fastspeech.inferencer.waveglow_inferencer import WaveGlowInferencer
from contextlib import ExitStack
import numpy as np
try:
from apex import amp
except ImportError:
ImportError('Required to install apex.')
pp = pprint.PrettyPrinter(indent=4, width=1000)
WARMUP_ITERS = 3
def perf_inference(hparam="infer.yaml",
with_vocoder=False,
n_iters=None,
device=DEFAULT_DEVICE,
**kwargs):
"""The script for estimating inference performance.
By default, this script assumes to load parameters in the default config file, fastspeech/hparams/infer.yaml.
Besides the flags, you can also set parameters in the config file via the command-line. For examples,
--dataset_path=DATASET_PATH
Path to dataset directory.
--checkpoint_path=CHECKPOINT_PATH
Path to checkpoint directory. The latest checkpoint will be loaded.
--batch_size=BATCH_SIZE
Batch size to use. Defaults to 1.
Refer to fastspeech/hparams/infer.yaml to see more parameters.
Args:
hparam (str, optional): Path to default config file. Defaults to "infer.yaml".
with_vocoder (bool, optional): Whether or not to estimate with a vocoder. Defaults to False.
n_iters (int, optional): Number of batches to estimate. Defaults to None (an epoch).
device (str, optional): Device to use. Defaults to "cuda" if avaiable, or "cpu".
"""
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
tprint("Device count: {}".format(torch.cuda.device_count()))
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
dataset = LJSpeechDataset(root_path=hp.dataset_path,
sr=hp.sr,
n_fft=hp.n_fft,
win_len=hp.win_len,
hop_len=hp.hop_len,
n_mels=hp.num_mels,
mel_fmin=hp.mel_fmin,
mel_fmax=hp.mel_fmax,
exclude_mels=True,
sort_by_length=True if hp.use_trt and hp.trt_multi_engine else False
)
tprint("Dataset size: {}".format(len(dataset)))
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
shuffle=False if hp.use_trt and hp.trt_multi_engine else True,
drop_last=True,
)
fs_inferencer = get_inferencer(model, data_loader, device)
if with_vocoder:
if hp.use_trt:
from fastspeech.trt.waveglow_trt_inferencer import WaveGlowTRTInferencer
wb_inferencer = WaveGlowTRTInferencer(ckpt_file=hp.waveglow_path, engine_file=hp.waveglow_engine_path, use_fp16=hp.use_fp16)
else:
wb_inferencer = WaveGlowInferencer(ckpt_file=hp.waveglow_path, device=device, use_fp16=hp.use_fp16)
with fs_inferencer, wb_inferencer if with_vocoder else ExitStack():
tprint("Perf started. Batch size={}.".format(hp.batch_size))
latencies = []
throughputs = []
n_iters = min(n_iters, len(data_loader)) if n_iters else len(data_loader)
assert(n_iters > WARMUP_ITERS)
for i in tqdm(range(n_iters)):
start = time.time()
outputs = fs_inferencer.infer()
mels = outputs['mel']
mel_masks = outputs['mel_mask']
assert(mels.is_cuda)
if with_vocoder:
# remove padding
max_len = mel_masks.sum(axis=1).max()
mels = mels[..., :max_len]
mel_masks = mel_masks[..., :max_len]
with torch.no_grad():
wavs = wb_inferencer.infer(mels)
wavs = to_cpu_numpy(wavs)
else:
# include time for DtoH copy
to_cpu_numpy(mels)
to_cpu_numpy(mel_masks)
end = time.time()
if i > WARMUP_ITERS-1:
time_elapsed = end - start
generated_samples = len(mel_masks.nonzero()) * hp.hop_len
throughput = generated_samples / time_elapsed
latencies.append(time_elapsed)
throughputs.append(throughput)
latencies.sort()
avg_latency = np.mean(latencies)
std_latency = np.std(latencies)
latency_90 = max(latencies[:int(len(latencies)*0.90)]) if n_iters > 1 else 0
latency_95 = max(latencies[:int(len(latencies)*0.95)]) if n_iters > 1 else 0
latency_99 = max(latencies[:int(len(latencies)*0.99)]) if n_iters > 1 else 0
throughput = np.mean(throughputs)
rtf = throughput / (hp.sr * hp.batch_size)
tprint("Batch size\tPrecision\tAvg Latency(s)\tStd Latency(s)\tLatency 90%(s)\tLatency 95%(s)\tLatency 99%(s)\tThroughput(samples/s)\tAvg RTF\n\
{}\t{}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{}\t{:.2f}".format(
hp.batch_size,
"FP16" if hp.use_fp16 else "FP32",
avg_latency,
std_latency,
latency_90,
latency_95,
latency_99,
int(throughput),
rtf))
if __name__ == '__main__':
fire.Fire(perf_inference)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ProjectionPlugin | taco2ProjectionPlugin | CMakeLists | #
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
file(GLOB SRCS *.cpp *.cu)
set(PLUGIN_SOURCES ${PLUGIN_SOURCES} ${SRCS})
set(PLUGIN_SOURCES ${PLUGIN_SOURCES} PARENT_SCOPE)
|
PyTorch/Detection/SSD/ssd | ssd | evaluate | # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import time
import numpy as np
from contextlib import redirect_stdout
import io
from pycocotools.cocoeval import COCOeval
def evaluate(model, coco, cocoGt, encoder, inv_map, args):
if args.distributed:
N_gpu = torch.distributed.get_world_size()
else:
N_gpu = 1
model.eval()
if not args.no_cuda:
model.cuda()
ret = []
start = time.time()
# for idx, image_id in enumerate(coco.img_keys):
for nbatch, (img, img_id, img_size, _, _) in enumerate(coco):
print("Parsing batch: {}/{}".format(nbatch, len(coco)), end='\r')
with torch.no_grad():
inp = img.cuda()
with torch.cuda.amp.autocast(enabled=args.amp):
# Get predictions
ploc, plabel = model(inp)
ploc, plabel = ploc.float(), plabel.float()
# Handle the batch of predictions produced
# This is slow, but consistent with old implementation.
for idx in range(ploc.shape[0]):
# ease-of-use for specific predictions
ploc_i = ploc[idx, :, :].unsqueeze(0)
plabel_i = plabel[idx, :, :].unsqueeze(0)
try:
result = encoder.decode_batch(ploc_i, plabel_i, 0.50, 200)[0]
except Exception as e:
print("Skipping idx {}, failed to decode with message {}, Skipping.".format(idx, e))
continue
htot, wtot = img_size[0][idx].item(), img_size[1][idx].item()
loc, label, prob = [r.cpu().numpy() for r in result]
for loc_, label_, prob_ in zip(loc, label, prob):
ret.append([img_id[idx], loc_[0] * wtot, \
loc_[1] * htot,
(loc_[2] - loc_[0]) * wtot,
(loc_[3] - loc_[1]) * htot,
prob_,
inv_map[label_]])
# Now we have all predictions from this rank, gather them all together
# if necessary
ret = np.array(ret).astype(np.float32)
# Multi-GPU eval
if args.distributed:
# NCCL backend means we can only operate on GPU tensors
ret_copy = torch.tensor(ret).cuda()
# Everyone exchanges the size of their results
ret_sizes = [torch.tensor(0).cuda() for _ in range(N_gpu)]
torch.cuda.synchronize()
torch.distributed.all_gather(ret_sizes, torch.tensor(ret_copy.shape[0]).cuda())
torch.cuda.synchronize()
# Get the maximum results size, as all tensors must be the same shape for
# the all_gather call we need to make
max_size = 0
sizes = []
for s in ret_sizes:
max_size = max(max_size, s.item())
sizes.append(s.item())
# Need to pad my output to max_size in order to use in all_gather
ret_pad = torch.cat([ret_copy, torch.zeros(max_size - ret_copy.shape[0], 7, dtype=torch.float32).cuda()])
# allocate storage for results from all other processes
other_ret = [torch.zeros(max_size, 7, dtype=torch.float32).cuda() for i in range(N_gpu)]
# Everyone exchanges (padded) results
torch.cuda.synchronize()
torch.distributed.all_gather(other_ret, ret_pad)
torch.cuda.synchronize()
# Now need to reconstruct the _actual_ results from the padded set using slices.
cat_tensors = []
for i in range(N_gpu):
cat_tensors.append(other_ret[i][:sizes[i]][:])
final_results = torch.cat(cat_tensors).cpu().numpy()
else:
# Otherwise full results are just our results
final_results = ret
if args.local_rank == 0:
print("")
print("Predicting Ended, total time: {:.2f} s".format(time.time() - start))
cocoDt = cocoGt.loadRes(final_results, use_ext=True)
E = COCOeval(cocoGt, cocoDt, iouType='bbox', use_ext=True)
E.evaluate()
E.accumulate()
if args.local_rank == 0:
E.summarize()
print("Current AP: {:.5f}".format(E.stats[0]))
else:
# fix for cocoeval indiscriminate prints
with redirect_stdout(io.StringIO()):
E.summarize()
# put your model in training mode back on
model.train()
return E.stats[0] # Average Precision (AP) @[ IoU=050:0.95 | area= all | maxDets=100 ]
|
TensorFlow/Detection/SSD/models/research/object_detection/metrics | metrics | tf_example_parser | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto parser for data loading.
A parser to decode data containing serialized tensorflow.Example
protos into materialized tensors (numpy arrays).
"""
import numpy as np
from object_detection.core import data_parser
from object_detection.core import standard_fields as fields
class FloatParser(data_parser.DataToNumpyParser):
"""Tensorflow Example float parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return np.array(
tf_example.features.feature[self.field_name].float_list.value,
dtype=np.float).transpose() if tf_example.features.feature[
self.field_name].HasField("float_list") else None
class StringParser(data_parser.DataToNumpyParser):
"""Tensorflow Example string parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return "".join(tf_example.features.feature[self.field_name]
.bytes_list.value) if tf_example.features.feature[
self.field_name].HasField("bytes_list") else None
class Int64Parser(data_parser.DataToNumpyParser):
"""Tensorflow Example int64 parser."""
def __init__(self, field_name):
self.field_name = field_name
def parse(self, tf_example):
return np.array(
tf_example.features.feature[self.field_name].int64_list.value,
dtype=np.int64).transpose() if tf_example.features.feature[
self.field_name].HasField("int64_list") else None
class BoundingBoxParser(data_parser.DataToNumpyParser):
"""Tensorflow Example bounding box parser."""
def __init__(self, xmin_field_name, ymin_field_name, xmax_field_name,
ymax_field_name):
self.field_names = [
ymin_field_name, xmin_field_name, ymax_field_name, xmax_field_name
]
def parse(self, tf_example):
result = []
parsed = True
for field_name in self.field_names:
result.append(tf_example.features.feature[field_name].float_list.value)
parsed &= (
tf_example.features.feature[field_name].HasField("float_list"))
return np.array(result).transpose() if parsed else None
class TfExampleDetectionAndGTParser(data_parser.DataToNumpyParser):
"""Tensorflow Example proto parser."""
def __init__(self):
self.items_to_handlers = {
fields.DetectionResultFields.key:
StringParser(fields.TfExampleFields.source_id),
# Object ground truth boxes and classes.
fields.InputDataFields.groundtruth_boxes: (BoundingBoxParser(
fields.TfExampleFields.object_bbox_xmin,
fields.TfExampleFields.object_bbox_ymin,
fields.TfExampleFields.object_bbox_xmax,
fields.TfExampleFields.object_bbox_ymax)),
fields.InputDataFields.groundtruth_classes: (
Int64Parser(fields.TfExampleFields.object_class_label)),
# Object detections.
fields.DetectionResultFields.detection_boxes: (BoundingBoxParser(
fields.TfExampleFields.detection_bbox_xmin,
fields.TfExampleFields.detection_bbox_ymin,
fields.TfExampleFields.detection_bbox_xmax,
fields.TfExampleFields.detection_bbox_ymax)),
fields.DetectionResultFields.detection_classes: (
Int64Parser(fields.TfExampleFields.detection_class_label)),
fields.DetectionResultFields.detection_scores: (
FloatParser(fields.TfExampleFields.detection_score)),
}
self.optional_items_to_handlers = {
fields.InputDataFields.groundtruth_difficult:
Int64Parser(fields.TfExampleFields.object_difficult),
fields.InputDataFields.groundtruth_group_of:
Int64Parser(fields.TfExampleFields.object_group_of),
fields.InputDataFields.groundtruth_image_classes:
Int64Parser(fields.TfExampleFields.image_class_label),
}
def parse(self, tf_example):
"""Parses tensorflow example and returns a tensor dictionary.
Args:
tf_example: a tf.Example object.
Returns:
A dictionary of the following numpy arrays:
fields.DetectionResultFields.source_id - string containing original image
id.
fields.InputDataFields.groundtruth_boxes - a numpy array containing
groundtruth boxes.
fields.InputDataFields.groundtruth_classes - a numpy array containing
groundtruth classes.
fields.InputDataFields.groundtruth_group_of - a numpy array containing
groundtruth group of flag (optional, None if not specified).
fields.InputDataFields.groundtruth_difficult - a numpy array containing
groundtruth difficult flag (optional, None if not specified).
fields.InputDataFields.groundtruth_image_classes - a numpy array
containing groundtruth image-level labels.
fields.DetectionResultFields.detection_boxes - a numpy array containing
detection boxes.
fields.DetectionResultFields.detection_classes - a numpy array containing
detection class labels.
fields.DetectionResultFields.detection_scores - a numpy array containing
detection scores.
Returns None if tf.Example was not parsed or non-optional fields were not
found.
"""
results_dict = {}
parsed = True
for key, parser in self.items_to_handlers.items():
results_dict[key] = parser.parse(tf_example)
parsed &= (results_dict[key] is not None)
for key, parser in self.optional_items_to_handlers.items():
results_dict[key] = parser.parse(tf_example)
return results_dict if parsed else None
|
PyTorch/LanguageModeling/BERT/triton/dist4l/runner | runner | start_NVIDIA-A30 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Install Docker
. /etc/os-release && \
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \
echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \
curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \
apt-get update && \
apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2
# Install packages
pip install -r triton/runner/requirements.txt
# Evaluate Runner
python3 -m "triton.dist4l.runner.__main__" \
--config-path "triton/dist4l/runner/config_NVIDIA-A30.yaml" \
--device 0 |
PyTorch/SpeechRecognition/Jasper/common | common | optimizers | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.optim import Optimizer
import math
def lr_policy(step, epoch, initial_lr, optimizer, steps_per_epoch, warmup_epochs,
hold_epochs, num_epochs=None, policy='linear', min_lr=1e-5,
exp_gamma=None):
"""
learning rate decay
Args:
initial_lr: base learning rate
step: current iteration number
N: total number of iterations over which learning rate is decayed
lr_steps: list of steps to apply exp_gamma
"""
warmup_steps = warmup_epochs * steps_per_epoch
hold_steps = hold_epochs * steps_per_epoch
if policy == 'legacy':
assert num_epochs is not None
tot_steps = num_epochs * steps_per_epoch
if step < warmup_steps:
a = (step + 1) / (warmup_steps + 1)
elif step < warmup_steps + hold_steps:
a = 1.0
else:
a = (((tot_steps - step)
/ (tot_steps - warmup_steps - hold_steps)) ** 2)
elif policy == 'exponential':
assert exp_gamma is not None
if step < warmup_steps:
a = (step + 1) / (warmup_steps + 1)
elif step < warmup_steps + hold_steps:
a = 1.0
else:
a = exp_gamma ** (epoch - warmup_epochs - hold_epochs)
else:
raise ValueError
new_lr = max(a * initial_lr, min_lr)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class AdamW(Optimizer):
"""Implements AdamW algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(torch.mul(p.data, group['weight_decay']).addcdiv_(1, exp_avg, denom), alpha=-step_size)
return loss
class Novograd(Optimizer):
"""
Implements Novograd algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging: gradient averaging
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
"""
def __init__(self, params, lr=1e-3, betas=(0.95, 0), eps=1e-8,
weight_decay=0, grad_averaging=False, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad)
super(Novograd, self).__init__(params, defaults)
def __setstate__(self, state):
super(Novograd, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Sparse gradients are not supported.')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
if group['weight_decay'] != 0:
grad.add_(p.data, alpha=group['weight_decay'])
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.data.add_(exp_avg, alpha=-group['lr'])
return loss
|
TensorFlow/Classification/ConvNets/model/layers | layers | dense | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
__all__ = ['dense']
def dense(
inputs,
units,
use_bias=True,
trainable=True,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
bias_initializer=tf.zeros_initializer()
):
net = tf.layers.dense(
inputs,
units=units,
activation=None,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
trainable=trainable
)
return net
|
TensorFlow/LanguageModeling/BERT | BERT | optimization_test | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import optimization
import tensorflow as tf
class OptimizationTest(tf.test.TestCase):
def test_adam(self):
with self.test_session() as sess:
w = tf.get_variable(
"w",
shape=[3],
initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
x = tf.constant([0.4, 0.2, -0.5])
loss = tf.reduce_mean(tf.square(x - w))
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
for _ in range(100):
sess.run(train_op)
w_np = sess.run(w)
self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
tf.test.main()
|
TensorFlow/Detection/SSD/models/research/object_detection/box_coders | box_coders | square_box_coder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.box_coder.square_box_coder."""
import tensorflow as tf
from object_detection.box_coders import square_box_coder
from object_detection.core import box_list
class SquareBoxCoderTest(tf.test.TestCase):
def test_correct_relative_codes_with_default_scale(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
scale_factors = None
expected_rel_codes = [[-0.790569, -0.263523, -0.293893],
[-0.068041, -0.272166, -0.89588]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_relative_codes_with_non_default_scale(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
scale_factors = [2, 3, 4]
expected_rel_codes = [[-1.581139, -0.790569, -1.175573],
[-0.136083, -0.816497, -3.583519]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_relative_codes_with_small_width(self):
boxes = [[10.0, 10.0, 10.0000001, 20.0]]
anchors = [[15.0, 12.0, 30.0, 18.0]]
scale_factors = None
expected_rel_codes = [[-1.317616, 0., -20.670586]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_correct_boxes_with_default_scale(self):
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
rel_codes = [[-0.5, -0.416666, -0.405465],
[-0.083333, -0.222222, -0.693147]]
scale_factors = None
expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432],
[0.155051, 0.102989, 0.522474, 0.470412]]
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
(boxes_out,) = sess.run([boxes.get()])
self.assertAllClose(boxes_out, expected_boxes)
def test_correct_boxes_with_non_default_scale(self):
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
rel_codes = [[-1., -1.25, -1.62186], [-0.166667, -0.666667, -2.772588]]
scale_factors = [2, 3, 4]
expected_boxes = [[14.594306, 7.884875, 20.918861, 14.209432],
[0.155051, 0.102989, 0.522474, 0.470412]]
anchors = box_list.BoxList(tf.constant(anchors))
coder = square_box_coder.SquareBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
(boxes_out,) = sess.run([boxes.get()])
self.assertAllClose(boxes_out, expected_boxes)
if __name__ == '__main__':
tf.test.main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt | tft_pyt | criterions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class QuantileLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.register_buffer('q', torch.tensor(config.quantiles))
def forward(self, predictions, targets):
diff = predictions - targets
ql = (1-self.q)*F.relu(diff) + self.q*F.relu(-diff)
losses = ql.view(-1, ql.shape[-1]).mean(0)
return losses
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | preparer | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from datetime import datetime
from typing import Dict, List
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .configuration import Configuration
from .downloader import download
from .experiment import Experiment, Stage
from .logger import LOGGER
from .maintainer import Maintainer
from .pipeline import Pipeline
from .stages import ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage
from .task import Checkpoint, Dataset, SystemInfo, Task
from .triton import Triton
from .utils import clean_directory
class Preparer(abc.ABC):
"""
Runner preparer object.
"""
@abc.abstractmethod
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
pass
class ExperimentPreparer(Preparer):
"""
Experiment runner preparer object.
"""
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
LOGGER.info("Preparing Triton container image")
triton_container_image = self._prepare_triton_container_image(config, maintainer, triton)
LOGGER.info("Initialize task")
task = self._initialize_task(
workspace=workspace,
config=config,
pipeline=pipeline,
triton_container_image=triton_container_image,
logs_dir=logs_dir,
)
LOGGER.info("Preparing directories")
self._create_dirs(workspace, task)
LOGGER.info("Clean previous run artifacts directories")
self._clean_previous_run_artifacts(workspace, task)
LOGGER.info("Downloading checkpoints")
self._download_checkpoints(task)
return task
def _create_dirs(self, workspace: pathlib.Path, task: Task) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
for directory in [task.results_dir, task.logs_dir, task.checkpoints_dir]:
directory_path = workspace / directory
directory_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory} created.")
def _clean_previous_run_artifacts(self, workspace: pathlib.Path, task: Task) -> None:
"""
Clean logs from previous run
Returns:
None
"""
for directory in [
task.logs_dir,
task.results_dir,
]:
directory_path = workspace / directory
clean_directory(directory_path)
LOGGER.info(f"Location {directory} cleaned.")
def _prepare_triton_container_image(self, config: Config, maintainer: Maintainer, triton: Triton) -> str:
"""
Prepare Triton Container Image based on provided configuration
Returns:
Name of container image to use in process
"""
if not config.triton_dockerfile:
image_name = triton.container_image(config.container_version)
LOGGER.info(f"Using official Triton container image: {image_name}.")
return image_name
if config.triton_container_image:
LOGGER.info(f"Using provided Triton Container Image: {config.triton_container_image}")
return config.triton_container_image
normalized_model_name = config.model_name.lower().replace("_", "-")
image_name = f"tritonserver-{normalized_model_name}:latest"
LOGGER.info(f"Building Triton Container Image: {image_name}")
maintainer.build_image(
image_name=image_name,
image_file_path=pathlib.Path(config.triton_dockerfile),
build_args={"FROM_IMAGE": triton.container_image(container_version=config.container_version)},
)
return image_name
def _download_checkpoints(self, task: Task) -> None:
"""
Download checkpoints
"""
for variant, checkpoint in task.checkpoints.items():
checkpoint_url = checkpoint.url
download_path = checkpoint.path
if download_path.is_dir():
LOGGER.info(f"Checkpoint {download_path.name} already downloaded.")
continue
if not checkpoint_url:
LOGGER.warning(
f"Checkpoint {variant} url is not provided."
"\nIf you want to use that checkpoint please train the model locally"
f"\nand copy to {download_path} directory"
)
continue
download(checkpoint_url, download_path)
def _initialize_task(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
triton_container_image: str,
logs_dir: pathlib.Path,
) -> Task:
"""
Initialize task object
Args:
workspace: Path to workspace where artifacts are stored
config: Config object
pipeline: Pipeline object
triton_container_image: Triton Inference Server container image used for tests
Returns:
Task object
"""
datasets = {}
for dataset in config.datasets:
datasets[dataset.name] = Dataset(name=dataset.name)
checkpoints = {}
for checkpoint in config.checkpoints:
download_path = workspace / Task.checkpoints_dir / checkpoint.name
checkpoints[checkpoint.name] = Checkpoint(name=checkpoint.name, url=checkpoint.url, path=download_path)
results_types = self._task_results_types(pipeline=pipeline)
stages = dict()
for stage in pipeline.stages():
stages[stage.label] = {"result_path": stage.result_path, "result_type": stage.result_type}
experiments = list()
for idx, configuration in enumerate(config.configurations, start=1):
experiment = self._prepare_experiment(
idx=idx,
configuration=configuration,
results_types=results_types,
stages=stages,
)
experiments.append(experiment)
system_info = SystemInfo.from_host()
task = Task(
model_name=config.model_name,
framework=config.framework,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config.datasets_dir,
experiments=experiments,
container_version=config.container_version,
system_info=system_info,
triton_container_image=triton_container_image,
triton_custom_operations=config.triton_custom_operations,
triton_load_model_method=config.triton_load_model_method,
started_at=int(datetime.utcnow().timestamp()),
logs_dir=logs_dir,
)
return task
def _task_results_types(self, pipeline: Pipeline) -> List[str]:
"""
Types of results generated as part of task
Returns:
List of result types
"""
results = list()
for stage in pipeline.stages():
if TritonPerformanceOfflineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_OFFLINE)
continue
if TritonPerformanceOnlineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_ONLINE)
continue
return results
def _prepare_experiment(
self,
idx: int,
configuration: Configuration,
results_types: List[str],
stages: Dict,
) -> Experiment:
"""
Prepare experiments data
Args:
idx: Experiment index
configuration: Configuration object
results_types: Results types stored in experiment
stages: Stages executed as part of experiment
Returns:
Experiment object
"""
parameters = {key.lower(): value for key, value in configuration.parameters.items()}
results_mapped = dict()
for result_type in results_types:
results_mapped[result_type] = result_type
stages_mapped = dict()
for name, stage_data in stages.items():
stages_mapped[name] = Stage(name=name, **stage_data)
experiment = Experiment(
experiment_id=idx,
parameters=parameters,
stages=stages_mapped,
results=results_mapped,
)
return experiment
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph | graph | graph | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import snap
from syngen.utils.types import MetaData
def safeSNAP(f):
def wrapper(*args, **kwargs):
graph = args[0]
graph.maybe_load_snap()
return f(*args, **kwargs)
return wrapper
class Graph(object):
def __init__(self, path=None, name=None, load_eagerly=False, is_directed=False, _snap_graph=None):
self.path = path
self.name = name
self.is_directed = is_directed
self.snapGraph = _snap_graph
if load_eagerly:
self.maybe_load_snap()
def maybe_load_snap(self):
if not self.snapGraph:
graph_type = snap.TNGraph if self.is_directed else snap.TUNGraph
self.snapGraph = snap.LoadConnList(graph_type, self.path)
@staticmethod
def instantiate_from_feature_spec(feature_spec, edge_name, graph_name=None):
edge_info = feature_spec.get_edge_info(edge_name)
is_bipartite = edge_info[MetaData.SRC_NODE_TYPE] != edge_info[MetaData.DST_NODE_TYPE]
is_directed = edge_info[MetaData.DIRECTED]
graph_type = snap.TNGraph if is_directed else snap.TUNGraph
struct_data = feature_spec.get_structural_data(edge_name)
if is_bipartite:
num_src_nodes = feature_spec.get_node_info(edge_info[MetaData.SRC_NODE_TYPE])[MetaData.COUNT]
num_dst_nodes = feature_spec.get_node_info(edge_info[MetaData.DST_NODE_TYPE])[MetaData.COUNT]
num_nodes = num_src_nodes + num_dst_nodes
else:
num_nodes = feature_spec.get_node_info(edge_info[MetaData.SRC_NODE_TYPE])[MetaData.COUNT]
snap_graph = graph_type.New(num_nodes, len(struct_data))
for i in range(num_nodes):
snap_graph.AddNode(i)
for e in struct_data:
snap_graph.AddEdge(int(e[0]), int(e[1]))
return Graph(_snap_graph=snap_graph, is_directed=is_directed, name=graph_name)
@safeSNAP
def edge_count(self):
return self.snapGraph.GetEdges()
@safeSNAP
def node_count(self):
return self.snapGraph.GetNodes()
@safeSNAP
def get_edges(self):
return [
(EI.GetSrcNId(), EI.GetDstNId()) for EI in self.snapGraph.Edges()
]
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers | layers | transformer_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based transformer block layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.layers import transformer
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class TransformerLayerTest(keras_parameterized.TestCase):
def test_layer_creation(self):
test_layer = transformer.Transformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_mask(self):
test_layer = transformer.Transformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
def test_layer_creation_with_incorrect_mask_fails(self):
test_layer = transformer.Transformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length - 3))
with self.assertRaisesRegex(ValueError, 'When passing a mask tensor.*'):
_ = test_layer([data_tensor, mask_tensor])
def test_layer_invocation(self):
test_layer = transformer.Transformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
def test_layer_invocation_with_mask(self):
test_layer = transformer.Transformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_layer_invocation_with_float16_dtype(self):
test_layer = transformer.Transformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
dtype='float16')
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(
shape=(sequence_length, width), dtype=tf.float16)
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width))).astype(np.float16)
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
def test_transform_with_initializer(self):
test_layer = transformer.Transformer(
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
sequence_length = 21
width = 80
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Classification/GPUNet/triton/175ms/runner | runner | start_NVIDIA-DGX-A100-(1x-A100-80GB) | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Evaluate Runner
python3 -m "triton.175ms.runner.__main__" \
--config-path "triton/175ms/runner/config_NVIDIA-DGX-A100-(1x-A100-80GB).yaml" \
--device 0 |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | postNetBuilder | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_POSTNETBUILDER_H
#define TT2I_POSTNETBUILDER_H
#include "IModelImporter.h"
#include "trtPtr.h"
#include <string>
namespace nvinfer1
{
class INetworkDefinition;
class IBuilder;
} // namespace nvinfer1
namespace tts
{
class PostNetBuilder
{
public:
/**
* @brief Create a new PostNetBuilder.
*
* @param numChannels The number of channels for the postnet.
* @param maxChunkSize The size of the input chunk.
* @param numDimensions The number of dimensions internally.
*/
PostNetBuilder(const int numChannels, const int maxChunkSize, const int numDimensions);
/**
* @brief Build the ICudaEngine for the PostNet.
*
* @param builder The engine builder.
* @param importer The model weight importer.
* @param maxBatchSize The maximum batch size to support.
* @param useFP16 Whether or not to allow FP16 usage in the build.
*
* @return The built engine.
*/
TRTPtr<nvinfer1::ICudaEngine> build(
nvinfer1::IBuilder& builder,
IModelImporter& importer,
const int maxBatchSize,
const bool useFP16);
private:
int mNumChannels;
int mMaxChunkSize;
int mNumDimensions;
};
} // namespace tts
#endif
|
TensorFlow/Detection/SSD/models/research/object_detection/data | data | mscoco_label_map | item {
name: "/m/01g317"
id: 1
display_name: "person"
}
item {
name: "/m/0199g"
id: 2
display_name: "bicycle"
}
item {
name: "/m/0k4j"
id: 3
display_name: "car"
}
item {
name: "/m/04_sv"
id: 4
display_name: "motorcycle"
}
item {
name: "/m/05czz6l"
id: 5
display_name: "airplane"
}
item {
name: "/m/01bjv"
id: 6
display_name: "bus"
}
item {
name: "/m/07jdr"
id: 7
display_name: "train"
}
item {
name: "/m/07r04"
id: 8
display_name: "truck"
}
item {
name: "/m/019jd"
id: 9
display_name: "boat"
}
item {
name: "/m/015qff"
id: 10
display_name: "traffic light"
}
item {
name: "/m/01pns0"
id: 11
display_name: "fire hydrant"
}
item {
name: "/m/02pv19"
id: 13
display_name: "stop sign"
}
item {
name: "/m/015qbp"
id: 14
display_name: "parking meter"
}
item {
name: "/m/0cvnqh"
id: 15
display_name: "bench"
}
item {
name: "/m/015p6"
id: 16
display_name: "bird"
}
item {
name: "/m/01yrx"
id: 17
display_name: "cat"
}
item {
name: "/m/0bt9lr"
id: 18
display_name: "dog"
}
item {
name: "/m/03k3r"
id: 19
display_name: "horse"
}
item {
name: "/m/07bgp"
id: 20
display_name: "sheep"
}
item {
name: "/m/01xq0k1"
id: 21
display_name: "cow"
}
item {
name: "/m/0bwd_0j"
id: 22
display_name: "elephant"
}
item {
name: "/m/01dws"
id: 23
display_name: "bear"
}
item {
name: "/m/0898b"
id: 24
display_name: "zebra"
}
item {
name: "/m/03bk1"
id: 25
display_name: "giraffe"
}
item {
name: "/m/01940j"
id: 27
display_name: "backpack"
}
item {
name: "/m/0hnnb"
id: 28
display_name: "umbrella"
}
item {
name: "/m/080hkjn"
id: 31
display_name: "handbag"
}
item {
name: "/m/01rkbr"
id: 32
display_name: "tie"
}
item {
name: "/m/01s55n"
id: 33
display_name: "suitcase"
}
item {
name: "/m/02wmf"
id: 34
display_name: "frisbee"
}
item {
name: "/m/071p9"
id: 35
display_name: "skis"
}
item {
name: "/m/06__v"
id: 36
display_name: "snowboard"
}
item {
name: "/m/018xm"
id: 37
display_name: "sports ball"
}
item {
name: "/m/02zt3"
id: 38
display_name: "kite"
}
item {
name: "/m/03g8mr"
id: 39
display_name: "baseball bat"
}
item {
name: "/m/03grzl"
id: 40
display_name: "baseball glove"
}
item {
name: "/m/06_fw"
id: 41
display_name: "skateboard"
}
item {
name: "/m/019w40"
id: 42
display_name: "surfboard"
}
item {
name: "/m/0dv9c"
id: 43
display_name: "tennis racket"
}
item {
name: "/m/04dr76w"
id: 44
display_name: "bottle"
}
item {
name: "/m/09tvcd"
id: 46
display_name: "wine glass"
}
item {
name: "/m/08gqpm"
id: 47
display_name: "cup"
}
item {
name: "/m/0dt3t"
id: 48
display_name: "fork"
}
item {
name: "/m/04ctx"
id: 49
display_name: "knife"
}
item {
name: "/m/0cmx8"
id: 50
display_name: "spoon"
}
item {
name: "/m/04kkgm"
id: 51
display_name: "bowl"
}
item {
name: "/m/09qck"
id: 52
display_name: "banana"
}
item {
name: "/m/014j1m"
id: 53
display_name: "apple"
}
item {
name: "/m/0l515"
id: 54
display_name: "sandwich"
}
item {
name: "/m/0cyhj_"
id: 55
display_name: "orange"
}
item {
name: "/m/0hkxq"
id: 56
display_name: "broccoli"
}
item {
name: "/m/0fj52s"
id: 57
display_name: "carrot"
}
item {
name: "/m/01b9xk"
id: 58
display_name: "hot dog"
}
item {
name: "/m/0663v"
id: 59
display_name: "pizza"
}
item {
name: "/m/0jy4k"
id: 60
display_name: "donut"
}
item {
name: "/m/0fszt"
id: 61
display_name: "cake"
}
item {
name: "/m/01mzpv"
id: 62
display_name: "chair"
}
item {
name: "/m/02crq1"
id: 63
display_name: "couch"
}
item {
name: "/m/03fp41"
id: 64
display_name: "potted plant"
}
item {
name: "/m/03ssj5"
id: 65
display_name: "bed"
}
item {
name: "/m/04bcr3"
id: 67
display_name: "dining table"
}
item {
name: "/m/09g1w"
id: 70
display_name: "toilet"
}
item {
name: "/m/07c52"
id: 72
display_name: "tv"
}
item {
name: "/m/01c648"
id: 73
display_name: "laptop"
}
item {
name: "/m/020lf"
id: 74
display_name: "mouse"
}
item {
name: "/m/0qjjc"
id: 75
display_name: "remote"
}
item {
name: "/m/01m2v"
id: 76
display_name: "keyboard"
}
item {
name: "/m/050k8"
id: 77
display_name: "cell phone"
}
item {
name: "/m/0fx9l"
id: 78
display_name: "microwave"
}
item {
name: "/m/029bxz"
id: 79
display_name: "oven"
}
item {
name: "/m/01k6s3"
id: 80
display_name: "toaster"
}
item {
name: "/m/0130jx"
id: 81
display_name: "sink"
}
item {
name: "/m/040b_t"
id: 82
display_name: "refrigerator"
}
item {
name: "/m/0bt_c3"
id: 84
display_name: "book"
}
item {
name: "/m/01x3z"
id: 85
display_name: "clock"
}
item {
name: "/m/02s195"
id: 86
display_name: "vase"
}
item {
name: "/m/01lsmm"
id: 87
display_name: "scissors"
}
item {
name: "/m/0kmg4"
id: 88
display_name: "teddy bear"
}
item {
name: "/m/03wvsk"
id: 89
display_name: "hair drier"
}
item {
name: "/m/012xff"
id: 90
display_name: "toothbrush"
}
|
TensorFlow/Detection/SSD/models/research/object_detection/data | data | mscoco_minival_ids | 25096
251824
35313
546011
524186
205866
511403
313916
47471
258628
233560
576017
404517
410056
178690
248980
511724
429718
163076
244111
126766
313182
191981
139992
325237
248129
214519
175438
493321
174103
563762
536795
289960
473720
515540
292118
360851
267175
532876
171613
581415
259819
441841
381682
58157
4980
473929
70626
93773
283412
36765
495020
278401
329307
192810
491784
506416
225495
553747
86442
242208
132686
385877
290248
525705
5476
486521
332512
138556
348083
284375
40018
296994
38685
432429
183407
434358
472164
530494
570693
193401
392612
98872
445766
532209
98322
285114
267725
51605
314812
91105
535506
540264
375341
449828
277659
68933
76873
217554
213592
190776
516224
474479
343599
578813
128669
546292
475365
377626
128833
427091
547227
11742
80213
462241
374574
121572
29151
13892
262394
303667
198724
7320
448492
419080
460379
483965
556516
139181
1103
308715
207507
213827
216083
445597
240275
379585
116389
138124
559051
326898
419386
503660
519460
23893
24458
518109
462982
151492
514254
2477
147165
570394
548766
250083
364341
351967
386277
328084
511299
499349
315501
234965
428562
219771
288150
136021
168619
298316
75118
189752
243857
296222
554002
533628
384596
202981
498350
391463
183991
528062
451084
7899
408534
329030
318566
22492
361285
226973
213356
417265
105622
161169
261487
167477
233370
142999
256713
305833
103579
352538
135763
392144
61181
200302
456908
286858
179850
488075
174511
194755
317822
2302
304596
172556
548275
341678
55299
134760
352936
545129
377012
141328
103757
552837
28246
125167
328745
278760
337133
403389
146825
502558
265916
428985
492041
113403
372037
306103
287574
187495
479805
336309
162043
95899
43133
464248
149115
247438
74030
130645
282841
127092
101172
536743
179642
58133
49667
170605
11347
365277
201970
292663
217219
463226
41924
281102
357816
490878
100343
525058
133503
416145
29341
415413
125527
507951
262609
240210
581781
345137
526342
268641
328777
32001
137538
39115
415958
6771
421865
64909
383601
206907
420840
370980
28452
571893
153520
185890
392991
547013
257359
279879
478614
131919
40937
22874
173375
106344
44801
205401
312870
400886
351530
344013
173500
470423
396729
402499
276585
377097
367619
518908
263866
332292
67805
152211
515025
221350
525247
78490
504342
95908
82668
256199
220270
552065
242379
84866
152281
228464
223122
67537
456968
368349
101985
14681
543551
107558
372009
99054
126540
86877
492785
482585
571564
501116
296871
20395
181518
568041
121154
56187
190018
97156
310325
393274
214574
243222
289949
452121
150508
341752
310757
24040
228551
335589
12020
529597
459884
344888
229713
51948
370929
552061
261072
120070
332067
263014
158993
451714
397327
20965
414340
574946
370266
487534
492246
264771
73702
43997
235124
301093
400048
77681
58472
331386
13783
242513
419158
59325
383033
393258
529041
249276
182775
351793
9727
334069
566771
539355
38662
423617
47559
120592
508303
462565
47916
218208
182362
562101
441442
71239
395378
522637
25603
484450
872
171483
527248
323155
240754
15032
419144
313214
250917
333430
242757
221914
283190
194297
228506
550691
172513
312192
530619
113867
323552
374115
35435
160239
62877
441873
196574
62858
557114
427612
242869
356733
304828
24880
490509
407083
457877
402788
536416
385912
544121
500389
451102
12120
483476
70987
482799
542549
49236
424258
435783
182366
438093
501824
232845
53965
223198
288933
450458
285664
196484
408930
519815
290981
398567
315792
490683
257136
75611
302498
332153
82293
416911
558608
564659
536195
370260
57904
527270
6593
145620
551650
470832
515785
251404
287331
150788
334006
266117
10039
579158
328397
468351
550400
31745
405970
16761
323515
459598
558457
570736
476939
472610
72155
112517
13659
530905
458768
43486
560893
493174
31217
262736
412204
142722
151231
480643
197245
398666
444869
110999
191724
479057
492420
170638
277329
301908
395644
537611
141887
47149
403432
34818
372495
67994
337497
478586
249815
533462
281032
289941
151911
271215
407868
360700
508582
103873
353658
369081
406403
331692
26430
105655
572630
37181
91336
484587
318284
113019
33055
25293
229324
374052
384111
213951
315195
319283
539453
17655
308974
326243
539436
417876
526940
356347
221932
73753
292648
262284
304924
558587
374858
253518
311744
539636
40924
136624
334305
365997
63355
191226
526732
367128
575198
500657
50637
17182
424792
565353
563040
383494
74458
155142
197125
223857
428241
440830
371289
437303
330449
93771
82715
499631
381257
563951
192834
528600
404273
270554
208053
188613
484760
432016
129800
91756
523097
317018
487282
444913
159500
126822
540564
105812
560756
306099
471226
123842
513219
154877
497034
283928
564003
238602
194780
462728
558640
524373
455624
3690
560367
316351
455772
223777
161517
243034
250440
239975
441008
324715
152106
246973
462805
296521
412767
530913
370165
292526
107244
217440
330204
220176
577735
197022
127451
518701
212322
204887
27696
348474
119233
282804
230040
425690
409241
296825
296353
375909
123136
573891
338256
198247
373375
151051
500084
557596
120478
44989
283380
149005
522065
626
17198
309633
524245
291589
322714
455847
248468
371948
444928
20438
481670
147195
95022
548159
553165
395324
391371
86884
561121
219737
38875
338159
377881
185472
359277
114861
378048
126226
10217
320246
15827
178236
370279
352978
408101
77615
337044
223714
20796
352445
263834
156704
377867
119402
399567
1180
257941
560675
390471
209290
258382
466339
56437
195042
384230
203214
36077
283038
38323
158770
532381
395903
375461
397857
326798
371699
369503
495626
464328
462211
397719
434089
424793
476770
531852
303538
525849
480917
419653
265063
48956
5184
279149
396727
374266
124429
36124
240213
147556
339512
577182
288599
257169
178254
393869
122314
28713
48133
540681
100974
368459
500110
73634
460982
203878
578344
443602
502012
399666
103603
22090
257529
176328
536656
408873
116881
460972
33835
460781
51223
46463
89395
407646
337453
461715
16257
426987
234889
3125
165643
517472
451435
206800
112128
331236
163306
94185
498716
532732
146509
458567
153832
105996
353398
546976
283060
247624
110048
243491
154798
543600
149962
355256
352900
203081
372203
284605
516244
190494
150301
326082
64146
402858
413538
399510
460251
94336
458721
57345
424162
423508
69356
567220
509786
37038
111535
341318
372067
358120
244909
180653
39852
438560
357041
67065
51928
171717
520430
552395
431355
528084
20913
309610
262323
573784
449485
154846
283438
430871
199578
516318
563912
348483
485613
143440
94922
168817
74457
45830
66297
514173
99186
296236
230903
452312
476444
568981
100811
237350
194724
453622
49559
270609
113701
415393
92173
137004
188795
148280
448114
575964
163155
518719
219329
214247
363927
65357
87617
552612
457817
124796
47740
560463
513968
273637
354212
95959
261061
307265
316237
191342
463272
169273
396518
93261
572733
407386
202658
446497
420852
229274
432724
34900
352533
49891
66144
146831
467484
97988
561647
301155
507421
173217
577584
451940
99927
350639
178941
485155
175948
360673
92963
361321
48739
577310
517795
93405
506458
394681
167920
16995
519573
270532
527750
563403
494608
557780
178691
8676
186927
550173
361656
575911
281315
534377
57570
340894
37624
143103
538243
425077
376545
108129
170974
7522
408906
264279
79415
344025
186797
234349
226472
123639
225177
237984
38714
223671
358247
152465
521405
453722
361111
557117
235832
309341
268469
108353
532531
357279
537280
437618
122953
7088
36693
127659
431901
57244
567565
568111
202926
504516
555685
322369
347620
110231
568982
295340
529798
300341
158160
73588
119476
387216
154994
259755
211282
433971
263588
299468
570138
123017
355106
540172
406215
8401
548844
161820
396432
495348
222407
53123
491556
108130
440617
448309
22596
346841
213829
135076
56326
233139
487418
227326
137763
383389
47882
207797
167452
112065
150703
421109
171753
158279
240800
66821
152886
163640
475466
301799
106712
470885
536370
420389
396768
281950
18903
357529
33650
168243
201004
389295
557150
185327
181256
557396
182025
61564
301928
332455
199403
18444
177452
204206
38465
215906
153103
445019
324527
299207
429281
574675
157067
241269
100850
502818
576566
296775
873
280363
355240
383445
286182
67327
422778
494855
337246
266853
47516
381991
44081
403862
381430
370798
173383
387173
22396
484066
349414
262235
492814
65238
209420
336276
453328
407286
420490
360328
158440
398534
489475
477389
297108
69750
507833
198992
99736
546444
514914
482574
54355
63478
191693
61684
412914
267408
424641
56872
318080
30290
33441
199310
337403
26731
453390
506137
188945
185950
239843
357944
290570
523637
551952
513397
357870
523517
277048
259879
186991
521943
21900
281074
187194
526723
568147
513037
177338
243831
203488
208494
188460
289943
399177
404668
160761
271143
76087
478922
440045
449432
61025
331138
227019
147577
548337
444294
458663
236837
6854
444926
484816
516641
397863
188534
64822
213453
66561
43218
514901
322844
498453
488788
391656
298994
64088
464706
193720
199017
186427
15278
350386
342335
372024
550939
35594
381382
235902
26630
213765
550001
129706
577149
353096
376891
28499
427041
314965
231163
5728
347836
184388
27476
284860
476872
301317
99546
147653
529515
311922
20777
2613
59463
430670
560744
60677
332087
296724
353321
103306
363887
76431
423058
120340
119452
6723
462327
163127
402723
489382
183181
107656
375409
355228
430762
512468
409125
270544
559113
495388
529434
38355
422025
379667
131386
183409
573536
581317
425404
350084
472
28532
329717
230220
187196
484166
97434
224595
87483
516998
314876
32610
514586
344816
394418
402330
305993
371497
315790
294908
207431
561014
26584
368671
374990
54747
47571
449424
283761
84735
522127
120473
524656
479659
131627
450959
153300
580908
207785
49115
284991
96505
278306
291655
1404
489304
557459
37740
157465
390475
119166
33871
247428
75905
20779
65035
333556
375415
383676
505243
87327
16451
287235
70190
245067
417520
229234
183786
333018
554156
198915
108021
128262
412443
242543
555050
436511
445233
207886
156397
526257
521357
413043
427189
401614
94823
351130
105945
182314
305879
526197
64409
496800
236461
138175
43816
185904
345711
72536
526737
360400
556537
426053
59044
28290
222548
434915
418623
246454
111801
12448
427133
459117
11262
169045
469996
304390
513096
322822
196371
504977
395364
243950
216218
417217
106736
58194
504101
478522
379314
30432
207027
297146
91844
176031
98287
278095
196053
343692
523137
220224
349485
376193
407067
185781
37871
336464
46331
44244
80274
170147
361106
468499
537864
467457
267343
291528
287828
555648
388284
576085
531973
350122
422253
509811
78093
410019
133090
581205
343976
9007
92478
450674
486306
503978
46378
335578
404071
225558
217923
406217
138054
575815
234990
336257
159240
399516
226408
531126
138599
61693
89861
29504
163296
477906
48419
25595
195594
97592
392555
203849
139248
245651
275755
245426
127279
521359
517623
235747
475906
11198
336101
70134
505447
218996
30080
484457
120441
575643
132703
197915
505576
90956
99741
517819
240918
150834
207306
132682
88250
213599
462584
413321
361521
496081
410583
440027
417284
397069
280498
473171
129739
279774
29370
518899
509867
85556
434930
280710
55077
348793
157756
281111
190689
281447
502854
232894
268742
199553
220808
137330
256903
116017
466416
41635
110906
340934
557501
146767
517617
487159
1561
417281
489014
292463
113533
412247
263973
515444
343561
310200
293804
225867
150320
183914
9707
89999
177842
296524
287829
68300
363654
465986
159969
313948
522779
219820
198352
12959
266727
8016
175804
497867
307892
287527
309638
205854
114119
23023
322586
383341
134198
553522
70426
329138
105367
175597
187791
17944
366611
93493
242422
41842
558840
32203
19667
124297
383726
252625
234794
498228
102906
287967
69021
51326
243896
509423
440124
122582
344325
34455
442478
23587
236904
185633
349841
44294
112568
186296
71914
3837
135486
223747
557517
385181
265313
404263
26564
516867
497096
332351
345139
444304
510877
356387
561214
311471
408789
561729
291380
174671
45710
435136
388858
361693
50811
531134
573605
340175
534988
382671
327047
348400
547137
401037
490711
499266
236370
449075
334015
107234
232315
462953
252048
186822
410168
28994
45550
453626
417957
468577
106338
391684
375143
217622
357903
347648
142182
213843
299148
352587
436676
161875
144655
304741
235017
181799
211042
335507
553731
412531
229740
437129
423830
561806
337666
52016
138057
70254
494393
73119
262425
565395
305329
489611
377080
569450
549766
332940
235302
53893
203781
38449
114870
18699
396338
449839
423613
379767
369594
375812
359219
229311
291675
224907
416885
32964
573406
17282
103375
81860
576886
461334
35672
243442
217269
445055
211112
455675
412384
88967
550643
24223
504074
9275
155546
329542
172658
331600
315492
194208
162867
324614
432017
140860
157944
406616
486079
361172
258346
494140
315384
451014
242619
413684
386187
408501
121089
343603
232538
558671
551596
32992
406647
435260
11156
40896
175382
110560
252968
189694
63154
564816
72004
164788
434583
453104
111878
268484
290768
473215
450620
32673
277479
529917
315868
562419
378347
398637
84097
120527
134193
431472
400238
86426
208830
524535
22213
516813
526044
386193
246672
386739
559252
153344
236123
246074
323615
92644
408621
323231
499940
296105
578902
150098
145015
131431
318618
68409
497928
362520
467755
112702
163219
277289
192362
497674
525439
56267
465868
407570
551608
345211
179653
55295
97315
534041
505822
411082
132375
25378
272008
536605
123511
148737
577712
493751
29587
468297
528458
491058
558976
181421
209685
147545
486964
570516
168662
19446
395997
242911
232511
317035
354527
5961
513793
124390
370123
113397
195790
252813
326919
432414
409239
458221
115667
212239
279279
375554
546622
317188
260818
286021
377111
209868
243148
132037
560624
459721
193498
22623
254164
112841
383470
62692
227940
471335
44858
213649
179898
102837
474078
44478
256197
309492
182923
421139
275695
104965
480780
449749
76513
578591
336695
247474
320490
246105
53183
485740
575823
510735
290741
37017
348708
279784
453634
567644
434192
482719
435324
544299
106896
569926
301574
492885
103462
487151
513585
219647
303685
459645
76292
188579
154883
207728
425074
310493
27221
371694
119404
399665
273556
454577
580698
267664
295769
423740
22461
22667
508443
390401
369997
524627
193349
132223
576743
130586
487741
107542
501420
520109
308156
540581
231362
86471
472930
351133
463605
575577
159842
39504
223020
63525
298627
139883
375205
303549
16838
495680
408112
394474
188044
472143
463751
31481
378139
190853
442614
172006
140270
133051
178028
495090
88455
13232
46323
346275
425905
487013
433136
514402
521906
4157
61418
567205
213351
304008
296492
506561
408120
415961
323186
480379
349199
201918
135023
456483
136173
237917
4972
99081
331569
150007
36450
93400
487461
203629
218093
487181
113935
139512
210981
358883
47419
248382
80357
462663
83097
26159
80429
283055
452676
50159
12326
29430
303264
158122
569070
52925
534876
46975
426376
170293
434417
235517
218476
445008
482774
305632
116848
557252
229270
453485
382214
54759
59171
193328
17152
238071
148531
409725
75434
65358
473057
415408
579415
48636
269606
298784
162799
356400
326854
24601
66499
340247
20992
190218
548464
122203
405306
495376
536028
5713
206831
9395
503939
194440
474253
395849
165141
204935
412621
402922
87141
570664
202622
137362
221737
78947
112129
341957
169562
164780
360216
107641
415015
444955
559102
123070
176592
309366
116461
222075
530470
214363
414487
471567
292123
370210
364243
510254
396350
141524
220310
398604
145436
392476
17482
78032
336171
130812
489743
346638
418854
139072
263860
458240
383443
337533
182334
535608
517946
489924
308117
129945
59973
538364
513458
449433
25165
335851
487688
153834
347612
349689
443688
486008
479149
442286
61108
315338
511546
506444
775
121839
291412
497626
387223
367095
557896
196118
530652
447991
215622
232160
296731
272273
473415
364705
235790
479950
141278
547903
66523
353989
121875
237735
100083
348941
288983
390083
168248
120776
489764
219135
551713
256035
309005
112493
579759
114972
458992
295768
158497
309696
363844
507966
313491
280779
327130
292901
127761
183843
456521
164475
224281
443713
72514
567383
476215
565650
17708
474471
248334
196313
164759
212453
319024
332916
35436
113139
172716
7570
161609
144534
137475
561411
45844
332027
36990
190160
421231
283210
365611
511407
400887
485071
481214
347203
153506
397403
229599
357322
76034
101189
567444
92363
526767
218811
362812
339120
579696
399269
10705
549012
410428
105623
535307
419235
119911
236604
515779
188173
66397
549119
478742
256180
128224
440539
112818
315434
97513
171970
433483
226008
83217
424548
343753
350334
479280
208808
43266
399893
444386
47687
499093
565269
465835
167486
433460
169872
299640
158466
241373
50576
161567
73560
349804
181745
352684
450357
532693
88335
256518
94926
541197
14629
276149
539439
498738
25654
291330
146465
160190
513064
75748
499007
164464
134042
422416
543315
34056
303197
394801
293071
44964
529083
414522
331180
227599
581040
382850
159898
176841
205352
540782
406591
184499
14380
350230
458175
528786
314935
111086
2191
20371
337042
558371
296907
539937
511463
574856
87864
403817
152598
169712
533227
173545
478862
19455
258433
373440
460229
525682
176857
525050
277025
156416
206784
415179
183204
210374
312868
514366
65208
376342
515792
383066
85247
119132
338007
88748
206705
495808
532164
150686
35474
207860
111165
391199
346011
537721
11390
487482
360983
400347
92795
347506
324322
371958
101280
222842
563604
210299
150616
96351
330455
273551
228749
248051
495252
372265
52664
191874
157416
446428
136681
1228
321811
93791
477867
192520
157124
40620
200541
103904
329494
60093
112573
489125
513115
322968
561619
74309
572462
248252
375376
217312
243213
79878
452218
349754
554291
434043
460373
452591
567787
504711
196007
511153
312416
296056
308849
203667
253223
331230
465545
363048
69392
301506
216198
147979
6005
381870
56983
320972
144122
210855
151480
299288
462486
103931
321079
4134
239861
540006
413805
221222
198943
450790
380597
388298
58737
246197
160726
398554
513946
222235
323851
364703
125643
169800
445662
223764
575372
489207
559474
7155
453819
402720
102355
415076
287436
35705
111076
395865
310862
570834
54728
215778
80053
35148
350488
524140
190097
36661
302110
96884
383397
245462
446958
138937
424712
561814
276964
148034
411068
357824
103257
322149
508899
580294
214386
114419
271429
168260
209835
573072
252269
31980
161308
281508
192714
247599
188948
180563
419601
233660
154804
311846
181499
5535
175082
531018
412338
166995
441411
427820
516846
287366
67959
271266
330845
74209
508167
542699
66485
453756
158412
443784
118097
265050
29074
152623
532493
292988
530384
192660
502336
472648
151657
351626
241010
115070
268356
539557
304698
251140
497158
527445
385428
179200
512394
184978
141910
36311
579457
19129
424960
181714
126216
512911
488360
379533
337551
325410
364587
468885
211107
90062
500446
105960
451951
431431
134178
164548
173826
373988
15157
3091
393557
380011
75372
37403
209995
493610
315899
353299
355040
547000
86133
58174
377326
510230
480583
158588
432529
311206
127626
239980
166340
104185
405174
507211
542782
448078
253477
542694
567308
214853
288824
283268
480757
503200
221089
112388
171539
124452
224200
206362
428754
256192
119414
351620
330050
547504
216398
94261
19916
163242
432588
143824
361103
271138
260150
313627
141086
308263
388453
153217
372794
514787
251910
351335
92683
465836
18442
404128
208476
47873
303219
201622
367489
32760
436174
401926
338419
45248
328464
312216
156282
315702
300701
345401
515350
29094
284296
466449
351057
110672
364853
10014
415828
397522
451412
433124
158277
93476
183387
109889
223326
105547
530061
256301
526778
80974
86650
45835
202154
92678
315991
423919
455044
491168
272253
146627
285349
86001
44171
162332
257328
432820
519275
380639
269436
236016
543215
346752
575970
423498
136926
195648
126634
133078
138656
490012
122388
195165
434900
533625
504167
333697
216576
538775
125072
391154
545007
150292
566717
367362
490991
356623
141271
402795
516786
39499
536716
293324
212853
276381
57124
325992
394659
452178
117674
461172
518586
497021
462345
526570
17328
202928
62566
411277
256983
49473
211206
398031
277955
531178
453959
27946
252844
30273
536933
500298
229111
7977
27642
303726
79927
110313
527691
442205
33345
365851
233236
239157
409221
400803
32947
422516
359727
215872
559454
289716
450247
57827
312298
530383
260048
35857
224222
299533
13296
325907
117869
54088
391011
340478
205344
347823
468604
78701
101414
197499
490871
89273
380343
441974
35974
486114
354398
535536
294030
7276
278742
137028
98721
372764
429802
72105
220307
116845
195406
333000
130401
264382
125458
363036
286994
531070
113801
4108
47603
130118
573924
302990
237566
21470
577926
139436
425925
36844
63602
399791
35894
347228
225617
504813
245320
466007
553931
166731
164885
19090
457262
247806
502895
167593
352491
520
26386
497348
352000
386164
32901
730
30925
333167
150361
231747
462244
504958
260738
313762
346645
486118
202998
541613
183884
230245
83172
126638
51844
421673
118625
377723
229427
371326
104345
361687
114246
397354
104137
120850
260516
389168
234555
26348
78522
409784
303024
377949
69887
546983
113736
298197
476810
137315
376321
410337
492905
119785
158167
185930
354061
106563
328452
506587
536517
480173
570688
376441
252127
247720
132554
41923
400317
170041
151938
198650
6437
49091
221820
455966
309859
300659
15850
388014
253386
65415
238228
548882
302155
93483
371869
397287
315249
360564
448410
21382
477474
144862
517515
230190
322353
231568
14940
132719
498942
182469
113720
168890
94852
246077
117535
52596
419116
522020
255338
125228
564332
106375
249534
220915
177758
293057
222430
196878
554980
375606
173081
84936
418907
562229
457616
125700
66038
239274
574110
305540
98431
167347
53345
438481
286010
5569
343606
168898
191301
236338
291394
715
520237
236954
192212
524002
471625
476029
413124
203455
483328
476417
114389
372428
369221
322654
388157
561314
264540
418680
359540
426182
521613
92248
74478
398905
554273
125909
430583
418959
503522
382999
403145
536375
352618
108193
279696
163253
439007
204536
552186
269926
372147
399921
201418
240565
471483
91619
393971
331648
385856
567440
81922
391722
372894
535997
134096
545958
239943
186929
34222
177714
277812
197111
281878
532003
557172
142890
196116
385454
322845
374987
123137
255112
111207
304819
523526
336046
42893
241273
240049
90659
271364
408008
253282
167067
354278
178317
229653
93333
163666
566920
495199
100329
218119
558864
257382
406152
206587
420339
325919
278853
555763
293200
151000
209664
79380
197177
353953
464522
392260
46144
154202
164366
206025
511236
24921
497907
393226
318138
364125
157321
492395
187857
109939
441500
144251
368581
51403
283498
43555
89356
404601
23272
425762
460682
544629
209829
322029
199247
307262
571242
124236
162393
104829
250766
563938
237399
131516
483001
21994
97958
540187
264497
384808
343187
51277
6712
566103
435384
292082
359039
165157
267972
263796
489313
392722
541924
554433
571034
146112
201934
518716
64116
294992
289586
159970
479617
269006
140465
513260
554805
6579
452696
34445
548296
372983
509656
199339
130030
128372
449454
139306
247914
99024
499134
536653
468917
412813
404338
215303
455414
413497
574988
397117
188631
378701
241867
143129
419884
412749
496954
317732
16977
398309
162363
147576
100016
209018
92660
173302
525732
449198
99734
12733
172946
168032
210988
340697
4795
534887
483553
278323
178175
190095
357542
230432
227460
334609
562121
378126
555357
325666
451859
526837
531710
297249
294839
499785
254976
527220
173057
11760
163012
215998
114420
57812
563712
513887
201859
36333
291990
338375
460621
518889
337502
133050
80172
537007
295270
335644
227852
336044
204137
82259
165675
295713
343937
442567
356002
346932
62985
180925
525381
13081
377406
159774
462643
359105
185821
390201
84168
128059
80340
481159
491902
306619
353807
390569
541562
292616
64621
439224
96288
449798
160927
496324
90778
126145
97230
572767
11570
539075
350988
3779
208135
551315
216449
169606
502
67765
281414
118594
146127
543985
124927
471394
385508
373783
501315
140974
42757
527054
202387
513056
329931
153973
510152
520812
534601
131282
386638
508538
234779
229329
396568
153568
229478
153574
356299
436694
324139
299409
212462
478155
393266
117836
190760
213605
196
444382
445211
363845
433277
521141
464786
169076
301402
4495
177258
328962
183757
452966
416059
113233
559417
280678
481398
328372
234910
30667
343062
383046
370953
258089
404229
456931
535183
300867
60507
262672
7288
81100
575395
539951
347848
437594
352005
14941
196453
528386
466939
482187
293468
494077
217285
362951
435751
411480
517315
480015
60610
353001
376442
430265
478338
303069
525344
437331
389315
8179
31981
313872
330920
515465
258905
142249
323128
389699
565012
124636
488693
376608
309424
370596
261940
39871
226984
152866
515050
116861
412876
120411
550452
565273
273791
181466
183155
293505
336113
569997
303738
331049
147030
74058
198176
23991
198841
79816
85183
261535
566756
386291
318200
569849
57429
36049
420827
519271
24391
172087
158795
133002
522198
133698
499365
79261
258860
457718
179948
421875
558073
206684
529762
456756
65773
425722
53102
294264
416730
38574
176275
404297
127494
242060
272212
189244
510861
421370
208516
206431
248457
39502
375087
130839
308730
572453
263474
544611
255708
412604
390094
578131
234463
493563
9450
381914
148999
32300
423576
569758
347253
92939
112212
13923
39472
363736
289659
269949
88349
188522
488915
129054
573823
316000
440562
408818
539302
199575
122300
340047
322816
472878
313922
228071
265648
400166
169166
10040
125245
148766
31281
172599
431067
208236
441824
175611
15148
431199
521587
50025
443139
349822
515056
27530
571970
82367
7115
424333
157601
537506
447187
115182
547597
5586
143040
31650
196336
279818
206273
403104
514248
243190
558642
548246
16848
391539
89614
284589
191314
259452
208380
209441
465463
385005
321385
223569
11727
87574
566470
210890
323598
427193
425676
401240
94021
259571
447553
456053
84693
14278
119995
234595
408696
136271
143560
357578
28071
36561
157102
293789
392251
356622
180274
48320
475779
301326
100977
413551
574010
404479
80725
552221
575441
197424
124601
215633
359546
25386
73199
334466
156572
124614
34121
460049
327623
441695
292488
476514
464018
348571
113413
125208
129690
446218
493761
383413
460390
343149
374041
525211
451263
333683
385194
107427
102872
517249
475879
575755
147787
297180
343774
112437
142240
384503
511111
51089
145408
143582
408138
162858
71850
126925
222781
314616
425609
203928
337563
223300
52644
272566
232597
374430
469075
267164
265851
28134
308889
465795
47263
233727
42
493117
124621
533378
361259
458750
429033
383289
490927
520964
174420
64425
378859
401850
281475
46508
205300
280736
110961
230679
151956
321497
73665
488736
165353
365983
556230
21465
581226
448861
3793
347335
150726
75319
2521
285894
133876
104589
346013
63516
83656
491515
326256
49942
28508
475413
270222
235839
48554
327777
111179
507171
425973
449490
205239
82375
459575
432300
91885
340922
270239
195894
121417
344831
439651
232148
391688
480793
534275
260823
469294
8688
255654
191300
383464
81594
21240
478077
517596
555953
294119
402234
459500
564280
106849
167501
98328
267411
145512
272599
50054
414156
161129
418226
11796
502090
390350
440500
240727
104406
163682
437910
143767
358901
527631
500543
28377
231097
227985
556703
421566
73201
478393
280347
15497
131969
515760
295440
462527
42147
120007
212895
425361
454143
5758
366782
213932
229848
458861
132791
476664
150365
343038
529649
180515
499810
329041
15660
419228
396295
502644
321085
245049
34193
217323
446455
528046
375573
15802
147448
407291
84000
280891
150487
510606
163025
249964
126123
233771
118507
97278
357386
23121
10580
2153
176017
371472
373289
173908
296797
334083
301107
577522
125404
278359
575032
273002
266371
108315
255633
503490
250051
143927
117407
198271
447043
329789
399991
458388
87489
228411
494634
260802
454161
446322
231079
438373
395665
244539
212427
356660
347276
183287
498374
21167
544522
418533
288493
245660
406103
406976
367313
455555
117337
384465
185697
160393
463825
276852
181462
176288
452816
102497
54277
225791
361046
197278
9857
227736
398992
55868
170914
181677
467803
560470
264599
540372
559442
201207
137227
267643
355471
245431
555669
344498
84783
193474
102411
401860
119469
448786
449990
568082
340472
307573
231828
307547
82052
15140
493612
503972
386592
473219
495557
159440
355869
311531
209733
240119
415048
296098
249482
15663
151432
263011
488539
463913
502798
174276
495613
407861
229304
146742
545039
161202
295134
162144
453317
52759
335201
222903
20333
559550
336049
346140
491223
306611
102746
455355
449921
477288
77821
289712
452663
147758
129571
490869
345961
94501
160394
432993
178796
372494
316323
383435
194940
74583
148911
518027
431827
32724
158548
227227
500330
54679
321024
471175
252074
476569
573258
337247
294373
558661
148898
563267
163112
411968
193565
455210
349344
337160
160456
255158
553678
123843
549687
381968
579471
100604
379841
357526
197263
14756
412639
210915
47204
539251
166255
490199
260363
91654
170550
187888
97362
285418
176993
292741
361901
296988
223496
493753
114907
151358
316534
472509
499802
348519
347747
58851
104790
396779
130528
2255
19624
526800
233950
505945
131207
290750
114090
196665
8708
134688
394715
115088
492196
530099
518729
291572
421457
445365
78929
415461
551796
210002
207913
344878
303893
149196
353275
122413
553361
519132
467135
431439
17089
322119
228214
35062
105689
366141
285651
60409
472671
401446
492846
21023
421952
374100
265200
506628
62298
243626
212122
350648
409921
428140
399212
388267
198921
429246
202040
570001
261346
61171
131815
455448
82696
554607
102174
386803
188421
191846
209898
380117
321064
119617
188651
132210
244299
174072
542910
378334
118405
543347
183657
581180
395289
64760
265584
29573
493720
94795
315601
416596
260106
244019
463884
579468
112085
300972
238528
382542
57672
165298
46889
289497
337180
481252
7913
432150
288161
403758
257336
565331
346589
270785
205670
231580
508580
98871
239997
554579
160057
404922
78771
380756
171199
148077
22892
145378
26967
235200
176007
90349
554377
189744
257053
270515
66508
113890
291983
558927
420916
140908
58384
438226
575776
106935
40602
468993
494810
210408
365685
483722
39430
258793
272615
51476
189919
443887
391648
422670
445135
198959
405529
459757
465489
81827
262576
408289
309237
76249
460091
512630
45959
280320
200492
404652
48475
18480
457097
65889
162256
265950
520752
299082
51500
499313
104906
35438
167647
7274
387824
242139
173166
399830
12014
510642
154053
67785
78170
514118
87998
52703
203539
534533
85926
274438
401653
458790
509262
144481
387515
246649
503207
235131
501531
62025
43286
272323
326128
561889
167529
171067
50778
301282
469719
509388
480317
379055
546428
192763
445602
420882
232790
174332
232865
292822
511145
119502
312591
110330
281353
116244
58778
428079
64902
520840
232054
473214
572574
296684
351590
217997
178761
71618
226496
285212
381195
499903
232849
468997
345559
503097
578570
396404
405223
578752
403500
188958
504498
491623
462929
525762
395550
574227
240751
169356
524694
40886
571635
487774
86220
95677
268987
502599
155270
103855
125100
241355
220214
391774
110618
154587
134483
458781
360877
465963
194595
346934
127153
188078
553869
102665
400547
33759
42779
397587
140295
151807
549136
470288
89738
328368
546934
164255
563683
399988
360951
217303
326781
546133
135399
94666
330037
569839
411070
497466
404805
417854
318442
255036
457230
346863
307438
370448
5124
152582
38118
12179
58462
308420
329456
74920
250368
186428
556073
111806
361244
80273
230964
156754
503101
75173
389404
195538
88848
286018
245481
140929
533721
268378
70048
315467
46269
372807
192403
387328
163033
481314
65306
192529
321107
112232
441216
412399
565391
220670
61471
463290
346707
67587
147624
13031
396754
278601
439426
42834
281829
376209
353148
556562
97579
217989
319530
82551
235319
431799
53892
52853
54533
88897
225093
386777
546742
273684
413900
245447
577995
16249
188414
485142
199602
89258
109679
502397
14494
13632
51674
244999
305050
455956
426795
560700
327306
410301
343803
539422
156740
527845
100582
9941
466585
61515
231895
157052
41271
148128
141172
320232
78565
539883
391300
365182
322194
116517
323496
473783
519874
440706
361587
265153
329946
342814
32258
153510
194555
309317
245006
300303
97767
218224
370170
290477
207178
456730
209480
513775
199516
581542
32524
416337
96241
506279
422893
248911
509855
355183
201220
234914
333436
68198
429074
328430
160531
467854
280688
140661
349525
267315
565543
313162
25751
232574
560358
505213
494427
160308
287335
99182
413260
558808
290839
122954
229221
192007
243189
117645
552824
366111
102056
356949
566298
97899
422545
343769
13127
179273
104486
37660
304099
517570
20207
36484
36492
155974
107257
534019
522371
222825
96183
509227
302260
95078
280918
367582
317033
347982
73209
290521
187243
425151
483723
573796
187249
144114
132992
35887
546067
426532
45626
461805
129989
541478
485489
578498
485483
144784
248224
372362
92050
423519
473118
177207
105455
276434
157767
384335
509497
338191
224010
327388
96988
43376
67867
320743
555197
104453
14439
512194
396387
252559
108953
461262
66320
97946
238065
306139
572408
577864
81004
464526
89378
193389
259049
85665
381134
412419
308947
557510
502084
288290
254609
188752
439525
13980
140513
240173
305268
38678
394050
402926
364079
159260
293034
55429
289640
291028
211120
48050
93887
361029
486026
388374
207803
540174
530630
430359
36420
120099
199764
492911
84498
200882
139843
4975
421209
259513
520324
211317
236457
419344
3867
287846
50434
26624
507235
16238
103705
497555
440060
175825
245460
308276
178535
391735
206391
201550
400945
194634
262360
554142
407574
225225
246057
498627
486172
226571
461751
459733
345869
503841
286460
45644
22861
285599
580284
569565
286778
150024
542101
484075
538153
20470
128034
544120
357109
450728
550968
326230
558809
76334
555387
47121
523978
11081
378134
116279
364884
488250
551957
322824
545564
255573
286327
355453
361933
434897
32597
226761
166482
557564
208166
232115
283520
137395
555894
103509
174284
458313
316147
344059
370701
548930
89894
373662
572095
19324
574411
45746
480122
63950
92339
201111
157053
401539
427956
339099
274651
159537
556101
323399
564337
514915
556025
66427
322357
173737
369128
420230
45176
509675
374677
272311
109797
384723
383678
453040
91080
301634
533003
40361
221605
216228
104002
161011
146123
214421
496252
264948
9759
138856
316189
145734
50411
325157
259099
516856
529668
135976
467130
367433
385598
520933
102805
30066
436696
216837
380754
350457
126974
565374
73832
214703
110501
380609
135872
140231
251816
133836
398866
230362
426815
2240
51484
546325
224093
221190
525024
238806
99908
165795
109146
537727
496571
183803
211175
433845
168692
526394
368402
256309
468972
139169
398440
171678
547341
64332
533589
483249
406000
330348
439188
572886
252829
242724
139127
404568
45809
52257
458727
334509
559665
60992
290896
503106
27972
536891
410855
31202
457882
403315
87399
395291
322141
226377
202799
420826
553034
212077
97693
266370
101656
504142
342933
87567
342060
268854
437028
20175
198625
405047
382374
338291
403975
527906
322429
545550
140043
107389
74059
315621
110138
78381
295576
494438
106335
472349
15818
162358
366484
44604
66524
118606
366873
270721
556478
350789
298628
163314
262800
459428
491725
285421
406332
498280
34535
524282
315744
226592
218294
459141
242034
114164
293733
248242
452881
441496
54358
177489
372861
349489
483941
572802
356494
193875
146570
58253
21338
6220
341933
533368
1818
428248
293026
227656
193021
326938
512966
226020
343059
249720
540106
375278
300023
126512
517135
472540
361439
132702
503294
109537
540669
332007
245266
313999
10386
225715
311567
103837
302405
248616
102654
155087
124756
379659
569272
160166
428234
422280
174425
133412
174503
216581
345063
52949
69536
216161
272728
200870
120792
193480
493923
445567
558539
51938
422706
416271
244160
437898
327352
305480
349459
522418
485219
225133
361400
546569
190015
348216
421822
457683
178683
40894
234526
465074
518725
168096
210190
139605
35195
463640
286770
141651
112022
532552
325327
227224
17272
84163
331475
126065
289309
8583
52952
189427
579693
437947
187565
215982
356424
453731
463522
372316
251797
70187
280515
556608
341635
391067
469480
476298
57917
146672
122747
394328
12209
80013
573291
278449
129659
579560
557190
227468
334782
51157
23774
9426
86582
39211
275751
131597
51250
357255
9041
346482
9647
157019
409016
273416
114414
298172
388854
275025
58079
518034
503518
146710
120632
474680
303713
259097
479630
208318
437298
173704
361831
371638
344279
230175
72507
417980
72621
163057
92894
543525
577364
263696
472732
66027
391584
197745
131019
65604
91318
535934
212646
576354
482071
160556
120129
7260
344881
447548
318193
30383
527002
34904
35677
526222
105261
401897
399452
25660
524595
384512
117543
514600
268944
112664
222340
569058
495332
192153
75591
286711
174888
577065
25508
169972
401820
425475
290700
173091
559101
122418
244124
198645
325519
276437
528276
146614
45574
417804
326420
250594
27353
310407
370103
274957
561160
167598
397166
257458
404546
148392
373396
62230
493522
563665
274240
269815
79024
527427
84674
486788
267690
443347
149304
412285
207041
412916
10764
151338
299000
17882
475510
398188
558213
70493
180779
347210
280211
58146
379022
504125
537604
464858
329573
568623
228309
454444
552775
557884
435671
168706
142257
571437
574845
387773
321008
574208
405811
375426
321887
256852
433554
517029
125870
80395
497139
490008
405279
571857
225738
514913
456239
499402
96440
487607
370999
319617
370233
60760
352703
478575
84170
134112
77689
185036
73738
547502
104782
213276
136908
436273
442149
355000
374061
249884
105711
136464
146997
76351
388487
99115
124135
24721
132931
1149
182403
386089
81691
480657
441522
60989
268000
55840
514321
577959
359638
457986
533596
60332
367082
772
535842
473541
270677
409009
259216
302318
117036
331372
231125
384486
405214
20760
579760
172995
359110
83110
410068
109916
328757
299261
19028
515660
40757
10256
442695
553097
185903
74388
425120
241326
299609
29397
328728
283881
344029
367336
27075
163628
127263
488979
460147
473050
405762
221547
131581
561187
406489
140696
452721
530466
118965
398803
218365
298738
19441
521550
120157
498687
4754
365866
70865
235156
133386
142742
221183
262391
567053
520982
121349
448779
440354
3983
578993
519691
160703
103307
300408
137106
488377
523660
318022
132578
302520
153040
408817
145227
311190
159662
202923
256775
359864
384848
336404
185303
421703
362682
464622
246590
422729
165500
42563
219216
520232
95063
265547
532686
290558
112591
448211
315281
545475
225850
232460
82740
272880
347254
122047
352151
541486
97249
200252
544782
499571
379014
303534
479909
305464
323682
181524
273855
190783
567801
119752
241503
536429
327323
128756
349868
500495
372260
315824
484986
364993
124759
300124
329319
68628
14549
121897
506595
115709
199610
230150
31717
139549
222332
534161
360393
541664
507167
286523
158660
66926
195750
80022
589
252220
47255
247014
49881
455005
232453
445722
516805
544122
541917
469356
370042
130522
502163
307866
408894
524247
52233
177861
348881
357943
295303
475389
431691
61316
143998
503483
340155
488785
133636
133567
251627
470095
34873
88815
261178
468612
127477
157960
15687
303089
572331
456708
190515
126131
239194
332074
129765
107167
478184
421833
359715
112440
331317
74492
505386
247839
534210
134503
422700
352111
98674
546219
520508
503008
461953
101913
362092
22103
359128
316666
335579
414750
297980
365652
53635
547601
97589
570515
7125
99828
321437
80671
426275
294883
212605
424293
338108
25005
6949
234291
428399
7149
343076
575287
431848
307611
293909
542511
564739
573843
356878
472864
336793
121904
161060
254004
269873
216428
77172
346517
498555
203690
348973
117704
552672
275270
208107
314016
427518
278134
53420
318777
238980
350614
467315
61233
272188
550797
125051
553965
187286
282912
102532
156076
467848
130875
531585
523470
507684
332582
438989
489209
125944
127474
371957
570349
283286
541635
547106
253630
388677
572525
542302
554537
367205
228300
443498
356432
123946
490441
211063
224542
116574
434510
33116
353136
134167
128291
542510
433963
147453
365766
374806
336600
38238
165476
535578
127788
157099
173640
114348
496722
58141
467296
235864
5154
22775
422536
136820
453438
446359
41990
422240
39267
391392
233825
308504
478250
87328
4079
127074
267709
377635
353231
185768
487897
124215
249757
341681
557552
280733
374734
281601
456420
222266
491947
432732
467157
94025
410328
428291
397639
163528
234697
557573
208363
515962
358658
373075
438995
425672
450169
216103
254638
288591
53626
43417
372252
5038
218357
120860
399349
485509
530261
477087
352302
96075
495443
133928
197175
134074
212553
448181
152000
254277
105734
75481
343662
479350
554347
71090
297426
22176
277622
469235
163041
221272
154263
89296
68411
192871
183217
258141
53058
540529
566414
560948
254535
246076
135972
420069
431023
343643
32682
515176
222635
377155
547041
513283
26017
366096
252133
138078
25685
321798
549361
14088
423048
570810
374974
447501
492544
554046
575357
420791
6019
340451
66800
565575
148055
330432
483038
455004
288765
11034
86988
347142
450559
543581
293757
556901
533032
333020
260266
22420
13948
512657
214124
231236
177149
560879
491793
35767
312878
118542
450596
423773
48653
224523
509577
462677
75405
350023
452122
42008
302555
382309
468483
368684
372580
31333
153697
124876
330023
315672
53990
136533
82815
356836
414821
268717
7333
77544
525373
371042
227048
576327
419309
239773
8119
424135
297425
222711
489909
393995
31019
539326
517612
102461
199989
483374
44952
103863
528980
441543
85381
247234
50924
483994
87456
424271
356091
534669
378831
560662
298773
257896
498274
305800
40517
183949
276840
84442
297620
298252
119088
233315
283977
345154
287649
427311
63399
4700
463611
224104
209388
431655
364190
28864
412455
283290
228541
422200
985
133596
323853
503081
130732
224675
199688
230862
21396
485390
1532
125778
235541
370478
522478
514292
384338
531707
178746
532747
62915
519491
140691
112093
358024
263687
297595
506085
102446
325768
29558
222054
466965
316254
546500
216785
194184
464390
348371
231582
208995
464339
308856
340946
214604
570586
182227
248441
89078
376310
73450
115924
308235
15994
8749
429679
37751
122040
284286
388707
248163
11320
427997
282062
237600
376751
223314
86215
12443
163255
564940
462640
522713
306303
460675
126833
26201
224757
357899
546782
96427
480944
479556
569273
520528
190690
344832
462466
270354
559776
279259
280909
227781
163798
491098
439658
416088
107375
74132
379800
511654
346687
226161
578849
544272
146149
570624
178299
126671
356380
530766
175954
158798
422095
55780
512276
560626
187329
513125
347216
306486
161840
180917
188192
421437
93120
324891
252216
488476
578347
101959
10693
170038
213586
210439
469202
381463
343248
127785
287328
538690
16382
293022
112378
435785
56092
381504
284365
406129
233119
53629
188509
191053
81056
82252
538319
38439
181948
439710
529344
434035
342958
563882
37734
364743
330986
546226
463211
62210
442724
232241
293858
119345
61953
577033
522015
381587
350107
4936
511307
228771
177811
231450
176168
84540
259408
264238
539738
255827
459382
221105
431742
204337
227741
336356
37655
167159
59352
165937
53956
378712
88462
495786
542938
566498
367228
157577
442661
62363
390689
480664
521540
414249
20571
160855
451683
156832
570045
326542
568276
568717
563311
113579
218268
546095
160661
341118
150649
462632
198972
220025
61720
430681
524011
457217
40064
285583
314493
78023
470882
298722
555597
489829
314779
367818
138503
243737
580255
444565
386677
190841
493074
234347
466988
227033
519039
351554
390585
443303
140983
81079
538005
169757
368780
457322
341804
409116
181805
284292
551358
344548
503569
336587
417055
522315
58705
148955
375530
474934
577893
28881
360772
445267
244737
355777
72811
190788
54513
243075
518551
487530
292169
69293
397303
129285
429996
109532
53802
340573
91280
535602
270908
381925
549220
488573
47131
32735
117525
279085
43961
188906
394677
395
185201
189365
127596
32712
504810
3703
182874
146981
306755
453093
520503
169808
225670
91063
348584
461802
572555
185922
131497
46736
536006
256505
214975
13445
350736
98115
50304
361180
511333
564820
429717
222500
40083
538230
349438
371250
528578
240418
302380
261758
535809
308388
578878
509451
46919
562592
499950
90374
318146
195353
355325
314515
237277
203024
238911
32039
145591
16030
135411
229350
421757
48034
183704
307292
97974
275999
448256
451915
119113
143503
494141
50124
306553
35526
255279
560908
247264
367599
192782
511324
574350
67569
204360
111907
2839
513971
245201
185240
339468
540101
539673
194425
22168
520150
301595
96006
68286
131280
356662
182441
284749
107108
49761
386718
55244
187990
248678
147721
425727
360350
310797
76765
400489
247639
279864
44699
356145
69138
445041
560598
165464
536343
7818
322831
334760
451463
348730
285967
286353
201887
166165
359
465591
519359
550444
402711
3661
132706
534983
306281
150317
15978
580029
496090
267127
210980
384015
222559
2235
255649
278168
440840
27326
202562
230268
362712
1573
107661
464515
373132
447242
547440
43613
200143
260883
250901
64693
408480
204757
319933
147471
381332
518197
27656
260257
434580
159203
568630
497441
499597
60179
574804
343254
501762
220704
524536
86946
456046
62937
49633
144305
475593
478553
574145
63648
3794
303177
1340
82835
371427
156747
448694
219567
75095
242615
492077
132776
199125
349622
195754
455548
181873
138185
338044
362797
180953
505826
69773
304834
162580
154090
519853
319687
132328
27969
52166
100547
568131
415218
348045
478159
402869
10211
26547
551692
105432
313340
182348
383419
570947
345353
226883
255784
214199
262262
283261
449708
299970
392391
245997
330410
343571
519542
37470
42144
342521
498537
10935
443860
512648
146099
98599
123932
489861
262895
184700
218587
363581
21001
481404
249356
64240
492349
199236
481064
353405
116479
132024
138768
524665
434511
326970
138784
340368
312081
366615
171942
21232
473850
93686
295574
51054
162692
174091
20070
270066
492816
20904
484500
147140
242972
420081
63563
261712
316396
49413
520787
510955
393840
142487
19817
261180
413736
230619
484614
337011
496575
4338
552545
5601
75426
568863
184227
170629
438567
505132
541353
284674
322567
182423
312051
18896
40471
321725
188850
37119
95569
187362
397133
528972
487131
174989
370325
223554
385633
103485
537574
63240
256566
86467
401092
486968
308441
280017
527464
131965
310479
125556
220160
532963
310052
107963
293841
388534
45603
368949
391825
5107
569705
231549
250108
152933
206433
358817
434006
283904
152808
539975
24629
410231
13465
502318
51961
445594
209062
38726
295420
430079
240147
561512
35795
102589
505619
565469
271772
520561
372300
178807
492805
1083
303704
125635
217521
278032
208688
335325
140435
313990
143822
320857
549230
76844
424219
463876
243199
2988
215170
30012
377738
408568
490624
404839
138316
157206
404461
122934
263346
21327
99913
67975
339676
391891
365305
337055
233834
125524
46869
32577
304744
104176
167356
210404
307989
217223
196046
454414
16356
244487
543660
197461
199681
476787
455085
307074
260547
107468
334769
29437
166837
53838
502979
82678
288860
535523
311950
237723
98656
223123
273930
58057
544334
324857
198043
535326
316505
12991
576820
43611
107839
275749
456695
78188
375786
466239
184830
537128
434513
244344
374576
69140
434247
555009
510857
220819
20598
99416
74967
533129
515577
213361
330974
548848
431557
503278
130043
402570
320554
559884
252629
364596
423484
271230
105552
143143
285751
49994
204162
80646
381393
123415
118417
30932
425412
388130
551243
468337
484893
25014
174390
463781
124647
60823
361964
425702
575110
532390
230881
84592
189997
221307
361472
32364
71918
316365
492378
234251
48504
418070
89884
562045
506552
66360
122962
262605
529939
345229
294853
344397
56091
8599
459823
175785
226128
259983
354515
379144
384995
205253
116786
441432
448810
83452
465129
506906
90616
551959
406404
157891
362090
439630
45099
61960
478430
489605
127050
579872
475798
64510
447733
33066
102848
538819
323760
200401
179765
251317
239376
83836
578092
522452
393056
278848
27787
377239
473427
83065
377005
576539
248019
473370
536369
92648
332461
437609
274800
388846
323048
193407
541898
480140
46526
26432
339738
325991
37705
528033
542922
313420
190463
531000
454907
26448
238199
476652
457147
364256
72632
430380
315448
353320
18158
91527
454252
546987
386370
38064
19763
64152
453216
55223
361860
522566
509531
438432
31164
163290
389197
333440
173464
447842
381615
99961
156126
103134
394940
165638
261706
378311
534081
373848
401642
338019
378096
289610
547421
174672
133343
191360
293751
520892
145214
167668
37456
460962
465267
292804
347529
203661
10766
27371
203845
155736
136715
463588
26640
547612
131453
184274
442456
265085
223256
129420
23019
536467
194532
127585
392637
330408
524775
31993
433924
502852
553129
559364
297343
71360
225537
271148
345499
475893
237463
5278
501243
413235
444236
541071
380088
468063
94858
225913
295614
210276
170975
205570
422375
550365
308702
484627
565031
98979
480345
579548
272673
436875
287874
16502
274917
281809
442968
289263
347766
160933
84533
266409
122199
396200
30958
504541
1591
89432
387150
306383
15260
154515
50752
166913
102644
100196
160278
349579
442536
17923
310564
62020
152004
578330
126299
527025
83494
226400
268435
445334
310391
505156
19157
44677
318171
447765
354369
527486
329939
184771
134856
467675
517133
89697
447080
70685
144938
519673
485758
454957
564851
189451
408757
192616
280734
305060
243946
99179
303971
170519
48917
549965
300245
384101
576607
186709
516341
241668
133470
134811
500825
464689
29833
343820
213429
387434
279305
444207
210777
372043
189868
572229
8495
370090
450282
277080
199158
109612
567708
245659
485129
268363
23448
5352
235597
6871
348720
94113
314613
63729
114458
215394
460460
240387
398726
135604
571728
415770
286908
138151
146272
344094
345209
241187
282768
113037
545583
219283
145873
285957
489235
157271
197458
502671
499845
334884
79084
505573
115618
561491
354202
279838
190734
134738
269450
482784
144610
52774
290659
440646
25807
442952
159215
318224
73445
211653
527960
401862
431026
488755
292278
400554
272630
382668
470298
166426
129645
28820
161227
417696
560677
283216
28978
310302
154419
230450
328289
73118
104691
15085
405574
510548
470005
102928
569249
413126
77282
96732
359020
42182
250875
106206
354929
320796
453341
237318
254834
137265
399865
292685
152252
319579
81484
16599
162257
351034
396051
502275
308278
34483
13333
320290
321579
349794
99219
200162
369470
487583
62703
251639
138246
157170
477112
283963
74860
307057
364075
295491
34757
400161
170194
120874
492817
3817
183973
135436
512989
114744
379210
201072
293785
578385
237420
7888
18224
155317
522406
441440
110482
173400
183348
552504
475660
166948
147025
443259
578792
245227
546687
474519
393284
249668
87493
151651
100306
540466
546556
212675
282942
21310
385535
7304
303409
386116
574297
514550
217133
533553
447152
578703
45392
166205
180154
25143
338802
330110
261389
343506
442726
285388
554934
421316
479912
85192
34874
487266
226173
20748
360660
574509
543364
1554
125539
566931
312889
466945
444804
257187
568587
427160
71123
563849
138589
162841
129663
107226
140686
321663
437117
179808
321718
62398
16497
468933
219841
355430
293554
293044
109516
485887
490620
579893
427135
31636
217919
432441
314396
119802
393682
201764
146193
116358
84825
208311
419774
177468
72052
142585
519598
464006
556083
412136
169361
442929
84567
549932
75560
74656
93314
393838
383018
372433
431281
556278
5513
108503
500478
148588
138713
368153
22646
303778
270758
276706
275429
492025
169111
494328
35891
70258
400528
165229
460494
269311
307658
98283
369294
319345
414578
541550
425388
129855
99477
383073
387906
293124
155873
549224
266021
52869
1584
421902
498535
277235
153013
452013
553561
138040
20820
58483
423506
569001
325153
383039
213421
38825
453283
384661
127702
238147
104893
577826
64974
240655
459153
145665
49810
65008
545385
125070
46433
143329
429174
52947
321314
253341
157365
453162
111910
339019
239575
362219
80652
247317
460286
365724
160875
372220
483389
572181
146190
580975
54761
348488
416104
468778
18833
251537
234366
510078
14723
338595
153797
513098
467138
404618
261982
545730
135846
108244
562557
180524
227370
341856
131743
255691
497878
68878
430640
441473
347664
214369
347018
225238
421762
317024
6180
172004
303101
22488
193494
199346
409627
315350
263463
190722
523292
363902
573778
437290
389812
517082
145073
37907
489763
456261
270386
508917
566823
543897
362482
130966
66632
181962
274613
135708
549746
323766
366714
353295
318813
153307
213693
293378
149446
199927
580543
331727
238488
472833
308645
424225
228746
110435
495377
240646
274491
130921
140006
4688
115241
76962
66650
47718
224991
434187
272048
11169
158222
154000
507436
443499
109937
309692
534018
22797
163339
168683
210098
246069
137954
143320
262587
414795
226938
536831
128791
459590
50514
30067
317479
378655
229968
522702
11122
515266
136600
224509
149912
97656
120747
349480
155199
528731
523807
168544
325664
229981
434410
431208
508996
63791
89225
513690
136740
224364
515424
508302
418175
465552
439907
272097
451087
396304
342273
52507
300066
380089
326248
167906
37846
262993
60090
499249
90432
74456
264660
325598
480985
245411
425644
224724
475439
246478
487438
563731
441854
522665
245915
85747
315162
108761
407521
388528
389453
298331
447791
368820
440034
305677
122208
182369
543531
151820
63650
457580
563381
320899
14869
137260
61925
376307
80367
269089
203705
274835
267321
418106
471273
74037
227855
519758
89045
321217
324203
479129
503431
368528
527718
278579
13525
291582
301837
31667
68120
14007
114158
124262
33626
53949
187585
192247
208844
212766
318671
575012
439339
364073
419624
178078
427783
302159
339368
190680
23807
288579
312720
15778
553558
571834
574376
122161
493815
472376
483432
149123
51628
264628
26609
23696
485081
441323
451679
42055
378795
86439
366493
520996
332869
18014
554523
83476
6040
421834
424392
308160
335233
249809
349098
358090
187349
61782
35498
386514
207108
578418
84447
104108
126107
211674
111909
490708
477025
206757
556205
142484
454296
464366
358254
215482
468548
82680
100909
405432
85764
94651
63973
8131
288592
257470
47597
321557
34520
134066
246701
317797
282365
78176
29577
311075
331937
190395
5802
245112
111032
140556
199127
376491
305253
300375
545903
357782
377911
74963
329336
25057
3244
252020
293474
171050
239306
189772
238090
160031
36761
445675
252716
152214
239466
55155
479829
420281
445812
118106
434576
451104
316708
438535
300322
167952
390072
487220
20247
9400
43944
35770
487351
425462
212203
9668
8981
574241
332096
535563
192944
498733
276151
550645
507037
9769
404249
236747
376416
306415
45966
191296
576875
493932
225075
536444
79920
561681
60700
99874
219437
509819
466665
579326
428739
394611
263083
379554
279391
178516
133690
77396
300137
6861
435359
314108
444152
500139
92749
89188
300233
414201
443204
211097
|
TensorFlow2/Segmentation/MaskRCNN/scripts | scripts | benchmark_inference | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script that simplifies running evaluation benchmark """
import argparse
import os
import shutil
import subprocess
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
if __name__ == '__main__':
# CLI flags
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description=(
'NVIDIA MaskRCNN TF2 evaluation benchmark'
'\n\nNote: Any additional flags not specified below will be passed to main.py'
),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100)
)
parser.add_argument('--batch_size', type=int, required=True,
help='Batch size used during training')
parser.add_argument('--amp', action='store_true',
help='Enable automatic mixed precision')
parser.add_argument('--no_xla', action='store_true',
help='Disables XLA - accelerated linear algebra')
parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data',
help='Input directory containing the dataset')
parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights',
help='Directory containing pre-trained resnet weights')
flags, remainder = parser.parse_known_args()
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py'))
checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp")
# build command
cmd = (
f'python {main_path}'
f' infer'
f' --data_dir "{flags.data_dir}"'
f' --backbone_checkpoint "{checkpoint_path}"'
f' --eval_samples {200 * flags.batch_size}'
f' --log_warmup_steps 100'
f' --log_every 10'
f' --eval_batch_size {flags.batch_size}'
)
if not flags.no_xla:
cmd += ' --xla'
if flags.amp:
cmd += ' --amp'
if remainder:
cmd += ' ' + ' '.join(remainder)
# print command
line = '-' * shutil.get_terminal_size()[0]
print(line, cmd, line, sep='\n', flush=True)
# run model
exit(subprocess.call(cmd, shell=True))
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit | deployment_toolkit | report | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
|
TensorFlow2/Detection/Efficientdet/dataset | dataset | README | This folder provides tools for converting raw coco/pascal data to tfrecord.
Download and pre-process COCO 2017 training and validation datasets for training efficientdet:
```bash
bash dataset/get_coco.sh
```
|
PyTorch/LanguageModeling/BERT/triton/runner/maintainer | maintainer | container | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any
class Container(abc.ABC):
def __init__(self, name: str):
self.name = name
self._container = None
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> Any:
"""
Run command inside container
Args:
command: command to execute
Returns:
Any
"""
pass
|
TensorFlow2/Detection/Efficientdet/model | model | postprocess | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Postprocessing for anchor-based detection."""
import functools
from typing import List, Tuple
from absl import logging
import tensorflow as tf
from model import nms_np
from utils import model_utils
from model import anchors
T = tf.Tensor # a shortcut for typing check.
CLASS_OFFSET = 1
def to_list(inputs):
if isinstance(inputs, dict):
return [inputs[k] for k in sorted(inputs.keys())]
if isinstance(inputs, list):
return inputs
raise ValueError('Unrecognized inputs : {}'.format(inputs))
def batch_map_fn(map_fn, inputs, *args):
"""Apply map_fn at batch dimension."""
if isinstance(inputs[0], (list, tuple)):
batch_size = len(inputs[0])
else:
batch_size = inputs[0].shape.as_list()[0]
if not batch_size:
# handle dynamic batch size: tf.vectorized_map is faster than tf.map_fn.
return tf.vectorized_map(map_fn, inputs, *args)
outputs = []
for i in range(batch_size):
outputs.append(map_fn([x[i] for x in inputs]))
return [tf.stack(y) for y in zip(*outputs)]
def clip_boxes(boxes: T, image_size: int) -> T:
"""Clip boxes to fit the image size."""
image_size = model_utils.parse_image_size(image_size) * 2
return tf.clip_by_value(boxes, [0], image_size)
def merge_class_box_level_outputs(params, cls_outputs: List[T],
box_outputs: List[T]) -> Tuple[T, T]:
"""Concatenates class and box of all levels into one tensor."""
cls_outputs_all, box_outputs_all = [], []
batch_size = tf.shape(cls_outputs[0])[0]
for level in range(0, params['max_level'] - params['min_level'] + 1):
if params['data_format'] == 'channels_first':
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, params['num_classes']]))
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
return tf.concat(cls_outputs_all, 1), tf.concat(box_outputs_all, 1)
def topk_class_boxes(params, cls_outputs: T,
box_outputs: T) -> Tuple[T, T, T, T]:
"""Pick the topk class and box outputs."""
batch_size = tf.shape(cls_outputs)[0]
num_classes = params['num_classes']
max_nms_inputs = params['nms_configs'].get('max_nms_inputs', 0)
if max_nms_inputs > 0:
# Prune anchors and detections to only keep max_nms_inputs.
# Due to some issues, top_k is currently slow in graph model.
logging.info('use max_nms_inputs for pre-nms topk.')
cls_outputs_reshape = tf.reshape(cls_outputs, [batch_size, -1])
_, cls_topk_indices = tf.math.top_k(
cls_outputs_reshape, k=max_nms_inputs, sorted=False)
indices = cls_topk_indices // num_classes
classes = cls_topk_indices % num_classes
cls_indices = tf.stack([indices, classes], axis=2)
cls_outputs_topk = tf.gather_nd(cls_outputs, cls_indices, batch_dims=1)
box_outputs_topk = tf.gather_nd(
box_outputs, tf.expand_dims(indices, 2), batch_dims=1)
else:
logging.info('use max_reduce for pre-nms topk.')
# Keep all anchors, but for each anchor, just keep the max probablity for
# each class.
cls_outputs_idx = tf.math.argmax(cls_outputs, axis=-1, output_type=tf.int32)
num_anchors = tf.shape(cls_outputs)[1]
classes = cls_outputs_idx
indices = tf.tile(
tf.expand_dims(tf.range(num_anchors), axis=0), [batch_size, 1])
cls_outputs_topk = tf.reduce_max(cls_outputs, -1)
box_outputs_topk = box_outputs
return cls_outputs_topk, box_outputs_topk, classes, indices
def pre_nms(params, cls_outputs, box_outputs, topk=True):
"""Detection post processing before nms.
It takes the multi-level class and box predictions from network, merge them
into unified tensors, and compute boxes, scores, and classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors].
topk: if True, select topk before nms (mainly to speed up nms).
Returns:
A tuple of (boxes, scores, classes).
"""
# get boxes by apply bounding box regression to anchors.
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], params['image_size'])
cls_outputs, box_outputs = merge_class_box_level_outputs(
params, cls_outputs, box_outputs)
if topk:
# select topK purely based on scores before NMS, in order to speed up nms.
cls_outputs, box_outputs, classes, indices = topk_class_boxes(
params, cls_outputs, box_outputs)
anchor_boxes = tf.gather(eval_anchors.boxes, indices)
else:
anchor_boxes = eval_anchors.boxes
classes = None
boxes = anchors.decode_box_outputs(box_outputs, anchor_boxes)
# convert logits to scores.
scores = tf.math.sigmoid(cls_outputs)
return boxes, scores, classes
def nms(params, boxes: T, scores: T, classes: T,
padded: bool) -> Tuple[T, T, T, T]:
"""Non-maximum suppression.
Args:
params: a dict of parameters.
boxes: a tensor with shape [N, 4], where N is the number of boxes. Box
format is [y_min, x_min, y_max, x_max].
scores: a tensor with shape [N].
classes: a tensor with shape [N].
padded: a bool vallue indicating whether the results are padded.
Returns:
A tuple (boxes, scores, classes, valid_lens), where valid_lens is a scalar
denoting the valid length of boxes/scores/classes outputs.
"""
nms_configs = params['nms_configs']
method = nms_configs['method']
max_output_size = nms_configs['max_output_size']
if method == 'hard' or not method:
# hard nms.
sigma = 0.0
iou_thresh = nms_configs['iou_thresh'] or 0.5
score_thresh = nms_configs['score_thresh'] or float('-inf')
elif method == 'gaussian':
sigma = nms_configs['sigma'] or 0.5
iou_thresh = 1.0
score_thresh = nms_configs['score_thresh'] or 0.001
else:
raise ValueError('Inference has invalid nms method {}'.format(method))
# TF API's sigma is twice as the paper's value, so here we divide it by 2:
# https://github.com/tensorflow/tensorflow/issues/40253.
nms_top_idx, nms_scores, nms_valid_lens = tf.raw_ops.NonMaxSuppressionV5(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
soft_nms_sigma=(sigma / 2),
pad_to_max_output_size=padded)
nms_boxes = tf.gather(boxes, nms_top_idx)
nms_classes = tf.cast(
tf.gather(classes, nms_top_idx) + CLASS_OFFSET, tf.float32)
return nms_boxes, nms_scores, nms_classes, nms_valid_lens
def postprocess_combined(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with combined NMS.
Leverage the tf combined NMS. It is fast on TensorRT, but slow on CPU/GPU.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
# Don't filter any outputs because combine_nms need the raw information.
boxes, scores, _ = pre_nms(params, cls_outputs, box_outputs, topk=False)
max_output_size = params['nms_configs']['max_output_size']
score_thresh = params['nms_configs']['score_thresh'] or float('-inf')
nms_boxes, nms_scores, nms_classes, nms_valid_len = (
tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
scores,
max_output_size,
max_output_size,
score_threshold=score_thresh,
clip_boxes=False))
nms_classes += CLASS_OFFSET
nms_boxes = clip_boxes(nms_boxes, params['image_size'])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def postprocess_global(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with global NMS.
A fast but less accurate version of NMS. The idea is to treat the scores for
different classes in a unified way, and perform NMS globally for all classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
def single_batch_fn(element):
return nms(params, element[0], element[1], element[2], True)
nms_boxes, nms_scores, nms_classes, nms_valid_len = batch_map_fn(
single_batch_fn, [boxes, scores, classes])
nms_boxes = clip_boxes(nms_boxes, params['image_size'])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def per_class_nms(params, boxes, scores, classes, image_scales=None):
"""Per-class nms, a utility for postprocess_per_class.
Args:
params: a dict of parameters.
boxes: A tensor with shape [N, K, 4], where N is batch_size, K is num_boxes.
Box format is [y_min, x_min, y_max, x_max].
scores: A tensor with shape [N, K].
classes: A tensor with shape [N, K].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
def single_batch_fn(element):
"""A mapping function for a single batch."""
boxes_i, scores_i, classes_i = element[0], element[1], element[2]
nms_boxes_cls, nms_scores_cls, nms_classes_cls = [], [], []
nms_valid_len_cls = []
for cid in range(params['num_classes']):
indices = tf.where(tf.equal(classes_i, cid))
if indices.shape[0] == 0:
continue
classes_cls = tf.gather_nd(classes_i, indices)
boxes_cls = tf.gather_nd(boxes_i, indices)
scores_cls = tf.gather_nd(scores_i, indices)
nms_boxes, nms_scores, nms_classes, nms_valid_len = nms(
params, boxes_cls, scores_cls, classes_cls, False)
nms_boxes_cls.append(nms_boxes)
nms_scores_cls.append(nms_scores)
nms_classes_cls.append(nms_classes)
nms_valid_len_cls.append(nms_valid_len)
# Pad zeros and select topk.
max_output_size = params['nms_configs'].get('max_output_size', 100)
nms_boxes_cls = tf.pad(
tf.concat(nms_boxes_cls, 0), [[0, max_output_size], [0, 0]])
nms_scores_cls = tf.pad(
tf.concat(nms_scores_cls, 0), [[0, max_output_size]])
nms_classes_cls = tf.pad(
tf.concat(nms_classes_cls, 0), [[0, max_output_size]])
nms_valid_len_cls = tf.stack(nms_valid_len_cls)
_, indices = tf.math.top_k(nms_scores_cls, k=max_output_size, sorted=True)
return tuple((
tf.gather(nms_boxes_cls, indices),
tf.gather(nms_scores_cls, indices),
tf.gather(nms_classes_cls, indices),
tf.minimum(max_output_size, tf.reduce_sum(nms_valid_len_cls))))
# end of single_batch_fn
nms_boxes, nms_scores, nms_classes, nms_valid_len = batch_map_fn(
single_batch_fn, [boxes, scores, classes])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def postprocess_per_class(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with per class NMS.
An accurate but relatively slow version of NMS. The idea is to perform NMS for
each class, and then combine them.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
return per_class_nms(params, boxes, scores, classes, image_scales)
def generate_detections(params,
cls_outputs,
box_outputs,
image_scales,
image_ids,
flip=False):
"""A legacy interface for generating [id, x, y, w, h, score, class]."""
_, width = model_utils.parse_image_size(params['image_size'])
original_image_widths = tf.expand_dims(image_scales, -1) * width
if params['nms_configs'].get('pyfunc', True):
# numpy based soft-nms gives better accuracy than the tensorflow builtin
# the reason why is unknown
detections_bs = []
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
for index in range(boxes.shape[0]):
nms_configs = params['nms_configs']
detections = tf.numpy_function(
functools.partial(nms_np.per_class_nms, nms_configs=nms_configs), [
boxes[index],
scores[index],
classes[index],
tf.slice(image_ids, [index], [1]),
tf.slice(image_scales, [index], [1]),
params['num_classes'],
nms_configs['max_output_size'],
], tf.float32)
if flip:
detections = tf.stack([
detections[:, 0],
# the mirrored location of the left edge is the image width
# minus the position of the right edge
original_image_widths[index] - detections[:, 3],
detections[:, 2],
# the mirrored location of the right edge is the image width
# minus the position of the left edge
original_image_widths[index] - detections[:, 1],
detections[:, 4],
detections[:, 5],
detections[:, 6],
], axis=-1)
detections_bs.append(detections)
return tf.stack(detections_bs, axis=0, name='detnections')
nms_boxes_bs, nms_scores_bs, nms_classes_bs, _ = postprocess_per_class(
params, cls_outputs, box_outputs, image_scales)
image_ids_bs = tf.cast(tf.expand_dims(image_ids, -1), nms_scores_bs.dtype)
if flip:
detections_bs = [
image_ids_bs * tf.ones_like(nms_scores_bs),
# the mirrored location of the left edge is the image width
# minus the position of the right edge
original_image_widths - nms_boxes_bs[:, :, 3],
nms_boxes_bs[:, :, 0],
# the mirrored location of the right edge is the image width
# minus the position of the left edge
original_image_widths - nms_boxes_bs[:, :, 1],
nms_boxes_bs[:, :, 2],
nms_scores_bs,
nms_classes_bs,
]
else:
detections_bs = [
image_ids_bs * tf.ones_like(nms_scores_bs),
nms_boxes_bs[:, :, 1],
nms_boxes_bs[:, :, 0],
nms_boxes_bs[:, :, 3],
nms_boxes_bs[:, :, 2],
nms_scores_bs,
nms_classes_bs,
]
return tf.stack(detections_bs, axis=-1, name='detnections')
def transform_detections(detections):
"""A transforms detections in [id, x1, y1, x2, y2, score, class] form to [id, x, y, w, h, score, class]."""
return tf.stack([
detections[:, :, 0],
detections[:, :, 1],
detections[:, :, 2],
detections[:, :, 3] - detections[:, :, 1],
detections[:, :, 4] - detections[:, :, 2],
detections[:, :, 5],
detections[:, :, 6],
],
axis=-1)
|
PyTorch/Detection/Efficientdet/scripts/waymo | waymo | train_waymo_AMP_8xA100-80G | #!/bin/bash
function get_dataloader_workers {
gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader)
core=$(nproc --all)
workers=$((core/gpus-2))
workers=$((workers>16?16:workers))
echo ${workers}
}
WORKERS=$(get_dataloader_workers)
./distributed_train.sh 8 /workspace/object_detection/datasets/waymo --model efficientdet_d0 -b 8 --amp --lr 0.2 --sync-bn --opt fusedmomentum --warmup-epochs 1 --output /model --worker $WORKERS --fill-color mean --model-ema --model-ema-decay 0.999 --eval-after 24 --epochs 24 --save-checkpoint-interval 1 --smoothing 0.0 --waymo --remove-weights class_net box_net anchor --input_size 1536 --num_classes 3 --resume --freeze-layers backbone --waymo-train /workspace/object_detection/datasets/waymo/training/images --waymo-val /workspace/object_detection/datasets/waymo/validation/images --waymo-val-annotation /waymo/validation/annotations/annotations-subset.json --waymo-train-annotation /waymo/training/annotations/annotations.json --initial-checkpoint /checkpoints/model_best.pth.tar |
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/dataset | dataset | traffic | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: data.datasets.create_datasets
config:
source_path: /workspace/datasets/traffic/traffic.csv
dest_path: /workspace/datasets/traffic/
time_ids: 'sensor_day'
train_range:
- 0
- 151
valid_range:
- 144
- 166
test_range:
- 159
- 2000
dataset_stride: 1
scale_per_id: False
encoder_length: 168
example_length: 192
MultiID: False
features:
- name: 'id'
feature_type: 'ID'
feature_embed_type: 'CATEGORICAL'
cardinality: 964
- name: 'hours_from_start'
feature_type: 'TIME'
feature_embed_type: 'CONTINUOUS'
- name: 'values'
feature_type: 'TARGET'
feature_embed_type: 'CONTINUOUS'
scaler:
_target_: sklearn.preprocessing.StandardScaler
- name: 'time_on_day'
feature_type: 'KNOWN'
feature_embed_type: 'CONTINUOUS'
scaler:
_target_: sklearn.preprocessing.StandardScaler
- name: 'day_of_week'
feature_type: 'KNOWN'
feature_embed_type: 'CATEGORICAL'
cardinality: 8
- name: 'hours_from_start'
feature_type: 'KNOWN'
feature_embed_type: 'CONTINUOUS'
scaler:
_target_: sklearn.preprocessing.StandardScaler
- name: 'categorical_id'
feature_type: 'STATIC'
feature_embed_type: 'CATEGORICAL'
cardinality: 964
train_samples: 450000
valid_samples: 50000
binarized: True
time_series_count: 964
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2PrenetPlugin | taco2PrenetPlugin | taco2PrenetKernel | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_PRENETKERNEL_H
#define TT2I_PRENETKERNEL_H
#include "cudaMemory.h"
#include <vector>
namespace nvinfer1
{
namespace plugin
{
class Taco2PrenetKernel
{
public:
/**
* @brief Create a new Taco2PrenetKernel.
*
* @param fc1WeightsHost The weights of the first fully connected layer.
* @param fc2WeightsHost The weights of the second fully connected layer.
* @param inputLength The length of the input.
* @param numDimension The number of dimensions of the FC layers.
*/
Taco2PrenetKernel(const std::vector<float>& fc1WeightsHost, const std::vector<float>& fc2WeightsHost,
int inputLength, int numDimension);
/**
* @brief Execute this kernel.
*
* @param inputDevice The input on the device.
* @param dropoutDevice The dropout input on the device.
* @param outputDevice THe output on the device.
* @param scratchDevice The scratch space on the device.
* @param stream The stream to operate on.
*/
void execute(const float* inputDevice, const float* dropoutDevice, float* outputDevice, float* scratchDevice,
cudaStream_t stream);
private:
int mInputLength;
int mNumDimension;
tts::CudaMemory<float> mWeights1Device;
tts::CudaMemory<float> mWeights2Device;
};
} // namespace plugin
} // namespace nvinfer1
#endif
|
Tools/PyTorch/TimeSeriesPredictionPlatform/evaluators | evaluators | triton_evaluator | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
import time
import logging
from tqdm import tqdm
from .evaluation_metrics import METRICS
from .evaluator import MetricEvaluator
from triton.run_inference_on_triton import AsyncGRPCTritonRunner
import tritonclient.http as triton_http
import tritonclient.grpc as triton_grpc
import xgboost as xgb
import hydra
class TritonEvaluator(MetricEvaluator):
def __init__(self, config):
self.output_selector = config.get("output_selector", None)
self.metrics = []
preprocessor_state = pickle.load(open(config.preprocessor_state_path, "rb"))
self.scalers = preprocessor_state["scalers"]
self.save_predictions = config.get("save_predictions", False)
self.example_history = []
for name in config.metrics:
if name not in METRICS:
raise ValueError(f"No metric of name: {name}")
self.metrics.append(METRICS[name]())
self.config = config
def predict(self, dataloader, model_name, server_url="localhost:8001"):
LOGGER = logging.getLogger("run_inference_on_triton")
runner = AsyncGRPCTritonRunner(
server_url,
model_name,
"1",
dataloader=dataloader(),
verbose=False,
resp_wait_s=120,
max_unresponded_reqs=128,
)
start = time.time()
preds_full = []
labels_full = []
weights_full = []
ids_full = []
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
if self.save_predictions:
self.example_history.append(x['target__6'][:,:self.config.encoder_length])
ids_full.append(ids)
preds_full.append(y_pred['target__0'])
labels_full.append(y_real['target__0'][:,:,0][:,:,np.newaxis])
weights_full.append(x['weight__9'])
stop = time.time()
preds_full = np.concatenate(preds_full, axis=0)
labels_full = np.concatenate(labels_full, axis=0)
weights_full = np.concatenate(weights_full, axis=0)
if np.isnan(weights_full).any():
weights_full = np.empty([0])
ids_full = np.concatenate(ids_full, axis=0)
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
if self.save_predictions:
self.example_history = np.concatenate(self.example_history, axis=0)
return preds_full, labels_full, ids_full, weights_full
def predict_xgboost(self, dataloader, max_batch_size, server_url="localhost:8001"):
grpc_client = triton_grpc.InferenceServerClient(
url=server_url,
verbose = False
)
out = []
labels = []
ids = []
weights = []
for i, (test_step, test_label) in enumerate(dataloader):
labels.append(test_label.to_numpy())
ids.append(test_step['_id_'].to_numpy())
data = test_step.to_numpy().astype('float32')
weights.append([])
test_len = len(data)
num_iters = int(test_len/max_batch_size) + 1
temp_out = []
for j in range(num_iters):
sliced_data = data[j*max_batch_size:(j+1)*max_batch_size]
dims = sliced_data.shape
triton_input_grpc = triton_grpc.InferInput(
'input__0',
dims,
'FP32'
)
triton_input_grpc.set_data_from_numpy(sliced_data)
triton_output_grpc = triton_grpc.InferRequestedOutput('output__0')
request_grpc = grpc_client.infer(
f'xgb_{i+1}',
model_version='1',
inputs=[triton_input_grpc],
outputs=[triton_output_grpc]
)
outt = request_grpc.as_numpy('output__0')
temp_out = np.hstack((temp_out, outt))
out.append(temp_out)
weights.append([])
outtemp = np.vstack(out).transpose()
labels_temp = np.hstack(labels)
ids_temp = np.vstack(ids).transpose()
if len(outtemp.shape) == 2:
outtemp = outtemp[:,:,np.newaxis]
if len(labels_temp.shape) == 2:
labels_temp = labels_temp[:, :, np.newaxis]
if self.save_predictions:
labels_ids = dataloader.data[['_id_', dataloader.target[0]]]
for n, g in labels_ids.groupby("_id_"):
labels_all = g[dataloader.target[0]].to_numpy().round(6)
windows_labels = np.lib.stride_tricks.sliding_window_view(labels_all, dataloader.example_length)
self.example_history.append(windows_labels.copy()[:, :dataloader.encoder_length])
self.example_history = np.concatenate(self.example_history, axis=0)[:, :, np.newaxis]
return outtemp, labels_temp, ids_temp[:,0], np.stack(weights)
|
TensorFlow2/LanguageModeling/BERT/official/modeling/activations | activations | swish | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Customized Swish activation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
def simple_swish(features):
"""Computes the Swish activation function.
The tf.nn.swish operation uses a custom gradient to reduce memory usage.
Since saving custom gradients in SavedModel is currently not supported, and
one would not be able to use an exported TF-Hub module for fine-tuning, we
provide this wrapper that can allow to select whether to use the native
TensorFlow swish operation, or whether to use a customized operation that
has uses default TensorFlow gradient computation.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.sigmoid(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def hard_swish(features):
"""Computes a hard version of the swish function.
This operation can be used to reduce computational cost and improve
quantization for edge devices.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.)
@tf.keras.utils.register_keras_serializable(package='Text')
def identity(features):
"""Computes the identity function.
Useful for helping in quantization.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return tf.identity(features)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit | deployment_toolkit | core | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class Accelerator(Parameter):
NONE = "none"
AMP = "amp"
TRT = "trt"
CUDA = NONE # backward compatibility
class Precision(Parameter):
INT8 = "int8"
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
FASTERTRANSFORMER = "fastertransformer"
class Model(NamedTuple):
handle: object
# TODO: precision should be removed
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class PerformanceTool(Enum):
"""
Available performance evaluation tools
"""
MODEL_ANALYZER = "model_analyzer"
PERF_ANALYZER = "perf_analyzer"
class BatchingMode(Enum):
"""
Available batching modes
"""
STATIC = "static"
DYNAMIC = "dynamic"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
SYSTEM = "system"
CUDA = "cuda"
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | box_predictor | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Box predictor for object detectors.
Box predictors are classes that take a high level
image feature map as input and produce two predictions,
(1) a tensor encoding box locations, and
(2) a tensor encoding classes for each box.
These components are passed directly to loss functions
in our detection models.
These modules are separated from the main model since the same
few box predictor architectures are shared across many models.
"""
from abc import abstractmethod
import tensorflow as tf
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
MASK_PREDICTIONS = 'mask_predictions'
class BoxPredictor(object):
"""BoxPredictor."""
def __init__(self, is_training, num_classes):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
"""
self._is_training = is_training
self._num_classes = num_classes
@property
def is_keras_model(self):
return False
@property
def num_classes(self):
return self._num_classes
def predict(self, image_features, num_predictions_per_location,
scope=None, **params):
"""Computes encoded object locations and corresponding confidences.
Takes a list of high level image feature maps as input and produces a list
of box encodings and a list of class scores where each element in the output
lists correspond to the feature maps in the input list.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location: A list of integers representing the number
of box predictions to be made per spatial location for each feature map.
scope: Variable and Op scope name.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
Raises:
ValueError: If length of `image_features` is not equal to length of
`num_predictions_per_location`.
"""
if len(image_features) != len(num_predictions_per_location):
raise ValueError('image_feature and num_predictions_per_location must '
'be of same length, found: {} vs {}'.
format(len(image_features),
len(num_predictions_per_location)))
if scope is not None:
with tf.variable_scope(scope):
return self._predict(image_features, num_predictions_per_location,
**params)
return self._predict(image_features, num_predictions_per_location,
**params)
# TODO(rathodv): num_predictions_per_location could be moved to constructor.
# This is currently only used by ConvolutionalBoxPredictor.
@abstractmethod
def _predict(self, image_features, num_predictions_per_location, **params):
"""Implementations must override this method.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location: A list of integers representing the number
of box predictions to be made per spatial location for each feature map.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
pass
class KerasBoxPredictor(tf.keras.Model):
"""Keras-based BoxPredictor."""
def __init__(self, is_training, num_classes, freeze_batchnorm,
inplace_batchnorm_update, name=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
name: A string name scope to assign to the model. If `None`, Keras
will auto-generate one from the class name.
"""
super(KerasBoxPredictor, self).__init__(name=name)
self._is_training = is_training
self._num_classes = num_classes
self._freeze_batchnorm = freeze_batchnorm
self._inplace_batchnorm_update = inplace_batchnorm_update
@property
def is_keras_model(self):
return True
@property
def num_classes(self):
return self._num_classes
def call(self, image_features, **kwargs):
"""Computes encoded object locations and corresponding confidences.
Takes a list of high level image feature maps as input and produces a list
of box encodings and a list of class scores where each element in the output
lists correspond to the feature maps in the input list.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
**kwargs: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
return self._predict(image_features, **kwargs)
@abstractmethod
def _predict(self, image_features, **kwargs):
"""Implementations must override this method.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
**kwargs: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A list of float tensors. Each entry in the list
corresponds to a feature map in the input `image_features` list. All
tensors in the list have one of the two following shapes:
a. [batch_size, num_anchors_i, q, code_size] representing the location
of the objects, where q is 1 or the number of classes.
b. [batch_size, num_anchors_i, code_size].
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
raise NotImplementedError
|
Tools/PyTorch/TimeSeriesPredictionPlatform | TimeSeriesPredictionPlatform | launch_inference | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import hydra
warnings.filterwarnings("ignore")
from inference.inference import run_inference
@hydra.main(config_path="conf/", config_name="inference_config")
def main(cfg):
print(cfg)
cfg.inference.config.checkpoint=cfg.checkpoint
hydra.utils.call(cfg, _recursive_=False)
if __name__ == "__main__":
main()
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | faster_rcnn_mobilenet_v1_feature_extractor_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for faster_rcnn_mobilenet_v1_feature_extractor."""
import numpy as np
import tensorflow as tf
from object_detection.models import faster_rcnn_mobilenet_v1_feature_extractor as faster_rcnn_mobilenet_v1
class FasterRcnnMobilenetV1FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return faster_rcnn_mobilenet_v1.FasterRCNNMobilenetV1FeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[4, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [4, 14, 14, 512])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[4, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [4, 14, 14, 512])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 512])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_on_very_small_images(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(
features_shape,
feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)})
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[3, 14, 14, 576], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [3, 7, 7, 1024])
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling | modeling | matcher | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
import torch
from maskrcnn_benchmark import _C
class Matcher(object):
"""
This class assigns to each predicted "element" (e.g., a box) a ground-truth
element. Each predicted element will have exactly zero or one matches; each
ground-truth element may be assigned to zero or more predicted elements.
Matching is based on the MxN match_quality_matrix, that characterizes how well
each (ground-truth, predicted)-pair match. For example, if the elements are
boxes, the matrix may contain box IoU overlap values.
The matcher returns a tensor of size N containing the index of the ground-truth
element m that matches to prediction n. If there is no match, a negative value
is returned.
"""
BELOW_LOW_THRESHOLD = -1
BETWEEN_THRESHOLDS = -2
def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
"""
Args:
high_threshold (float): quality values greater than or equal to
this value are candidate matches.
low_threshold (float): a lower quality threshold used to stratify
matches into three levels:
1) matches >= high_threshold
2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
allow_low_quality_matches (bool): if True, produce additional matches
for predictions that have only low-quality match candidates. See
set_low_quality_matches_ for more details.
"""
assert low_threshold <= high_threshold
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, match_quality_matrix):
"""
Args:
match_quality_matrix (Tensor[float]): an MxN tensor, containing the
pairwise quality between M ground-truth elements and N predicted elements.
Returns:
matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
[0, M - 1] or a negative value indicating that prediction i could not
be matched.
"""
if match_quality_matrix.numel() == 0:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError(
"No ground-truth boxes available for one of the images "
"during training")
else:
raise ValueError(
"No proposal boxes available for one of the images "
"during training")
# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
if match_quality_matrix.is_cuda:
matches = _C.match_proposals(match_quality_matrix,self.allow_low_quality_matches, self.low_threshold, self.high_threshold)
else:
matched_vals, matches = match_quality_matrix.max(dim=0)
if self.allow_low_quality_matches:
all_matches = matches.clone()
# Assign candidate matches with low quality to negative (unassigned) values
below_low_threshold = matched_vals < self.low_threshold
between_thresholds = (matched_vals >= self.low_threshold) & (
matched_vals < self.high_threshold
)
matches[below_low_threshold] = Matcher.BELOW_LOW_THRESHOLD
matches[between_thresholds] = Matcher.BETWEEN_THRESHOLDS
if self.allow_low_quality_matches:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
"""
Produce additional matches for predictions that have only low-quality matches.
Specifically, for each ground-truth find the set of predictions that have
maximum overlap with it (including ties); for each prediction in that set, if
it is unmatched, then match it to the ground-truth with which it has the highest
quality value.
"""
# For each gt, find the prediction with which it has highest quality
highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
# Find highest quality match available, even if it is low, including ties
gt_pred_pairs_of_highest_quality = torch.nonzero(
match_quality_matrix == highest_quality_foreach_gt[:, None]
)
# Example gt_pred_pairs_of_highest_quality:
# tensor([[ 0, 39796],
# [ 1, 32055],
# [ 1, 32070],
# [ 2, 39190],
# [ 2, 40255],
# [ 3, 40390],
# [ 3, 41455],
# [ 4, 45470],
# [ 5, 45325],
# [ 5, 46390]])
# Each row is a (gt index, prediction index)
# Note how gt items 1, 2, 3, and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
matches[pred_inds_to_update] = all_matches[pred_inds_to_update]
|
PyTorch/SpeechSynthesis/HiFiGAN/scripts | scripts | fine_tune | #!/usr/bin/env bash
set -a
: ${FINE_TUNE_DIR:="data/mels-fastpitch-ljs22khz"}
: ${FINE_TUNE_LR_FACTOR:=3}
: ${EPOCHS:=10000} # 6500 + 3500
if [ ! -d "$FINE_TUNE_DIR" ]; then
echo "Finetuning spectrograms missing at $FINE_TUNE_DIR ."
echo "Those need to be generated with scripts/extract_fine_tune_mels.sh"
echo "Consult the README.md for details."
exit 1
fi
bash scripts/train_lj22khz.sh "$@"
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_inference_runner | triton_inference_runner | __init__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import TritonInferenceRunner # noqa: F401
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | image_resizer_builder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function for image resizing operations."""
import functools
import tensorflow as tf
from object_detection.core import preprocessor
from object_detection.protos import image_resizer_pb2
def _tf_resize_method(resize_method):
"""Maps image resize method from enumeration type to TensorFlow.
Args:
resize_method: The resize_method attribute of keep_aspect_ratio_resizer or
fixed_shape_resizer.
Returns:
method: The corresponding TensorFlow ResizeMethod.
Raises:
ValueError: if `resize_method` is of unknown type.
"""
dict_method = {
image_resizer_pb2.BILINEAR:
tf.image.ResizeMethod.BILINEAR,
image_resizer_pb2.NEAREST_NEIGHBOR:
tf.image.ResizeMethod.NEAREST_NEIGHBOR,
image_resizer_pb2.BICUBIC:
tf.image.ResizeMethod.BICUBIC,
image_resizer_pb2.AREA:
tf.image.ResizeMethod.AREA
}
if resize_method in dict_method:
return dict_method[resize_method]
else:
raise ValueError('Unknown resize_method')
def build(image_resizer_config):
"""Builds callable for image resizing operations.
Args:
image_resizer_config: image_resizer.proto object containing parameters for
an image resizing operation.
Returns:
image_resizer_fn: Callable for image resizing. This callable always takes
a rank-3 image tensor (corresponding to a single image) and returns a
rank-3 image tensor, possibly with new spatial dimensions.
Raises:
ValueError: if `image_resizer_config` is of incorrect type.
ValueError: if `image_resizer_config.image_resizer_oneof` is of expected
type.
ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer
is used.
"""
if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer):
raise ValueError('image_resizer_config not of type '
'image_resizer_pb2.ImageResizer.')
image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof')
if image_resizer_oneof == 'keep_aspect_ratio_resizer':
keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer
if not (keep_aspect_ratio_config.min_dimension <=
keep_aspect_ratio_config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = _tf_resize_method(keep_aspect_ratio_config.resize_method)
per_channel_pad_value = (0, 0, 0)
if keep_aspect_ratio_config.per_channel_pad_value:
per_channel_pad_value = tuple(keep_aspect_ratio_config.
per_channel_pad_value)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=keep_aspect_ratio_config.min_dimension,
max_dimension=keep_aspect_ratio_config.max_dimension,
method=method,
pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension,
per_channel_pad_value=per_channel_pad_value)
if not keep_aspect_ratio_config.convert_to_grayscale:
return image_resizer_fn
elif image_resizer_oneof == 'fixed_shape_resizer':
fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer
method = _tf_resize_method(fixed_shape_resizer_config.resize_method)
image_resizer_fn = functools.partial(
preprocessor.resize_image,
new_height=fixed_shape_resizer_config.height,
new_width=fixed_shape_resizer_config.width,
method=method)
if not fixed_shape_resizer_config.convert_to_grayscale:
return image_resizer_fn
else:
raise ValueError(
'Invalid image resizer option: \'%s\'.' % image_resizer_oneof)
def grayscale_image_resizer(image, masks=None):
"""Convert to grayscale before applying image_resizer_fn.
Args:
image: A 3D tensor of shape [height, width, 3]
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, 1],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
"""
# image_resizer_fn returns [resized_image, resized_image_shape] if
# mask==None, otherwise it returns
# [resized_image, resized_mask, resized_image_shape]. In either case, we
# only deal with first and last element of the returned list.
retval = image_resizer_fn(image, masks)
resized_image = retval[0]
resized_image_shape = retval[-1]
retval[0] = preprocessor.rgb_to_gray(resized_image)
retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0)
return retval
return functools.partial(grayscale_image_resizer)
|
PyTorch/Classification/GPUNet/triton/05ms-D/runner | runner | config_NVIDIA-DGX-1-(1x-V100-32GB) | batching: dynamic
checkpoints:
- name: 0.5ms-D
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_p0_pyt_ckpt/versions/21.12.0_amp/zip
configurations:
- checkpoint: 0.5ms-D
parameters:
backend_accelerator: trt
checkpoint: 0.5ms-D
device_kind: gpu
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 64
number_of_model_instances: 2
precision: fp16
tensorrt_capture_cuda_graph: 0
torch_jit: none
container_version: '21.12'
datasets:
- name: imagenet
datasets_dir: datasets
ensemble_model_name: null
framework: PyTorch
measurement_steps_offline: 8
measurement_steps_online: 32
model_name: GPUnet
performance_tool: model_analyzer
triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
PyTorch/Detection/Efficientdet/effdet/csrc/focal_loss | focal_loss | focal_loss_cuda | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <torch/torch.h>
#include <vector>
// CUDA forward declarations
std::vector<at::Tensor> focal_loss_forward_cuda(
const at::Tensor &cls_output, const at::Tensor &cls_targets_at_level,
const at::Tensor &num_positives_sum, const int64_t num_real_classes,
const float alpha, const float gamma, const float smoothing_factor);
at::Tensor focal_loss_backward_cuda(const at::Tensor &grad_output,
const at::Tensor &partial_grad,
const at::Tensor &num_positives_sum);
// C++ interface
#define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) \
AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
std::vector<at::Tensor> focal_loss_forward(
const at::Tensor &cls_output, const at::Tensor &cls_targets_at_level,
const at::Tensor &num_positives_sum, const int64_t num_real_classes,
const float alpha, const float gamma, const float smoothing_factor) {
CHECK_INPUT(cls_output);
CHECK_INPUT(cls_targets_at_level);
CHECK_INPUT(num_positives_sum);
return focal_loss_forward_cuda(cls_output, cls_targets_at_level,
num_positives_sum, num_real_classes, alpha,
gamma, smoothing_factor);
}
at::Tensor focal_loss_backward(const at::Tensor &grad_output,
const at::Tensor &partial_grad,
const at::Tensor &num_positives_sum) {
CHECK_INPUT(grad_output);
CHECK_INPUT(partial_grad);
return focal_loss_backward_cuda(grad_output, partial_grad, num_positives_sum);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &focal_loss_forward,
"Focal loss calculation forward (CUDA)");
m.def("backward", &focal_loss_backward,
"Focal loss calculation backward (CUDA)");
}
|
PyTorch/Segmentation/nnUNet/nnunet | nnunet | metrics | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchmetrics import Metric
class Dice(Metric):
full_state_update = False
def __init__(self, n_class, brats):
super().__init__(dist_sync_on_step=False)
self.n_class = n_class
self.brats = brats
self.add_state("steps", default=torch.zeros(1), dist_reduce_fx="sum")
self.add_state("dice", default=torch.zeros((n_class,)), dist_reduce_fx="sum")
self.add_state("loss", default=torch.zeros(1), dist_reduce_fx="sum")
def update(self, p, y, l):
self.steps += 1
self.dice += self.compute_stats_brats(p, y) if self.brats else self.compute_stats(p, y)
self.loss += l
def compute(self):
return 100 * self.dice / self.steps, self.loss / self.steps
def compute_stats_brats(self, p, y):
scores = torch.zeros(self.n_class, device=p.device, dtype=torch.float32)
p = (torch.sigmoid(p) > 0.5).int()
y_wt, y_tc, y_et = y > 0, ((y == 1) + (y == 3)) > 0, y == 3
y = torch.stack([y_wt, y_tc, y_et], dim=1)
for i in range(self.n_class):
p_i, y_i = p[:, i], y[:, i]
if (y_i != 1).all():
# no foreground class
scores[i - 1] += 1 if (p_i != 1).all() else 0
continue
tp, fn, fp = self.get_stats(p_i, y_i, 1)
denom = (2 * tp + fp + fn).to(torch.float)
score_cls = (2 * tp).to(torch.float) / denom if torch.is_nonzero(denom) else 0.0
scores[i - 1] += score_cls
return scores
def compute_stats(self, p, y):
scores = torch.zeros(self.n_class, device=p.device, dtype=torch.float32)
p = torch.argmax(p, dim=1)
for i in range(1, self.n_class + 1):
if (y != i).all():
# no foreground class
scores[i - 1] += 1 if (p != i).all() else 0
continue
tp, fn, fp = self.get_stats(p, y, i)
denom = (2 * tp + fp + fn).to(torch.float)
score_cls = (2 * tp).to(torch.float) / denom if torch.is_nonzero(denom) else 0.0
scores[i - 1] += score_cls
return scores
@staticmethod
def get_stats(p, y, c):
tp = torch.logical_and(p == c, y == c).sum()
fn = torch.logical_and(p != c, y == c).sum()
fp = torch.logical_and(p == c, y != c).sum()
return tp, fn, fp
|
TensorFlow2/Recommendation/SIM/sim/utils | utils | benchmark | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import perf_counter
import horovod.tensorflow as hvd
import numpy as np
import tensorflow as tf
from horovod.tensorflow.mpi_ops import Sum
class PerformanceCalculator:
"""
PerformanceCalculator for throughput and latency statistics.
Computes the statistics over a given number of steps. Timers should be initialized by the user by
calling init() at the right moment -- just before running consecutive iterations of training.
Attributes:
warmup_steps (int): Number of initial steps to ignore for computing results.
total_steps (int): Number of steps to collect data for (excluding warmup_steps); use <= 0 for unbounded horizon.
"""
def __init__(self, warmup_steps=0, total_steps=0):
self.warmup_steps = max(warmup_steps, 0)
self.total_steps = self.warmup_steps + max(total_steps, 0)
self.step = 0
self.step_start_time = None
self.benchmark_start_time = None
self.benchmark_after_warmup_start_time = None
self.step_latencies = []
self.latency_percentiles = (90, 95, 99)
self._results = {}
with tf.device("/CPU:0"):
self.samples = tf.Variable(0, trainable=False, dtype=tf.int64)
def init(self):
self.samples.assign(0)
self.step_latencies = []
self._results = {}
# used to represent duration of entire training
self.benchmark_start_time = perf_counter()
# used to represent a time interval from post-warmup until the end
self.benchmark_after_warmup_start_time = perf_counter()
self.step_start_time = perf_counter()
@property
def results(self):
return self._results.copy()
@property
def completed(self):
return bool(self._results)
def get_current_benchmark_results(self):
if self.benchmark_start_time is None:
raise RuntimeError(f"{self.__class__.__name__} has not been initialized")
if self.step <= self.warmup_steps:
raise RuntimeError(f"{self.__class__.__name__} is in warmup phase")
results = self._calculate_throughput()
results.update(self._calculate_latency())
return results
def _calculate_latency(self):
latency_stats = {"latency_mean": 1000 * np.mean(self.step_latencies)} # in milliseconds
for p in self.latency_percentiles:
latency_stats[f"latency_p{p}"] = 1000 * np.percentile(self.step_latencies, p)
return latency_stats
def _calculate_throughput(self):
time_elapsed = perf_counter() - self.benchmark_start_time
time_elapsed_after_warmup = perf_counter() - self.benchmark_after_warmup_start_time
all_samples = hvd.allreduce(self.samples, op=Sum)
benchmark_throughput = all_samples.numpy() / time_elapsed_after_warmup
return {"throughput": benchmark_throughput, "time": time_elapsed}
def __call__(self, n_samples):
self.samples.assign_add(n_samples)
step_latency = perf_counter() - self.step_start_time
step_throughput = n_samples * hvd.size() / step_latency
self.step_latencies.append(step_latency)
self.step += 1
if self.step == self.warmup_steps:
self.samples.assign(0)
self.step_latencies = []
self.benchmark_after_warmup_start_time = perf_counter()
elif self.step == self.total_steps:
self._results = self.get_current_benchmark_results()
self.step_start_time = perf_counter()
return step_throughput
|
PyTorch/Segmentation/nnUNet/scripts | scripts | inference | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from os.path import dirname
from subprocess import run
parser = ArgumentParser(ArgumentDefaultsHelpFormatter)
parser.add_argument("--data", type=str, required=True, help="Path to data")
parser.add_argument("--task", type=str, default="01", help="Path to data")
parser.add_argument("--fold", type=int, required=True, choices=[0, 1, 2, 3, 4], help="Fold number")
parser.add_argument("--dim", type=int, required=True, help="Dimension of UNet")
parser.add_argument("--ckpt_path", type=str, required=True, help="Path to checkpoint")
parser.add_argument("--batch_size", type=int, default=4, help="Batch size")
parser.add_argument("--amp", action="store_true", help="Enable automatic mixed precision")
parser.add_argument("--tta", action="store_true", help="Enable test time augmentation")
parser.add_argument("--save_preds", action="store_true", help="Save predicted masks")
if __name__ == "__main__":
args = parser.parse_args()
path_to_main = os.path.join(dirname(dirname(os.path.realpath(__file__))), "main.py")
cmd = f"python {path_to_main} --exec_mode predict --task {args.task} --gpus 1 "
cmd += f"--data {args.data} "
cmd += f"--dim {args.dim} "
cmd += f"--fold {args.fold} "
cmd += f"--ckpt_path {args.ckpt_path} "
cmd += f"--val_batch_size {args.batch_size} "
cmd += "--amp " if args.amp else ""
cmd += "--tta " if args.tta else ""
cmd += "--save_preds " if args.save_preds else ""
run(cmd, shell=True)
|
PyTorch/Detection/SSD/examples | examples | SSD300_A100_FP16_8GPU | # This script launches SSD300 training in FP16 on 8 GPUs using 1024 batch size (128 per GPU)
# Usage ./SSD300_FP16_8GPU.sh <path to this repository> <path to dataset> <additional flags>
torchrun --nproc_per_node=8 $1/main.py --backbone resnet50 --learning-rate 2.7e-3 --warmup 1200 --bs 128 --data $2 ${@:3}
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/layers | layers | _utils | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import glob
import os.path
import torch
try:
from torch.utils.cpp_extension import load as load_ext
from torch.utils.cpp_extension import CUDA_HOME
except ImportError:
raise ImportError("The cpp layer extensions requires PyTorch 0.4 or higher")
def _load_C_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
this_dir = os.path.dirname(this_dir)
this_dir = os.path.join(this_dir, "csrc")
main_file = glob.glob(os.path.join(this_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(this_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(this_dir, "cuda", "*.cu"))
source = main_file + source_cpu
extra_cflags = []
if torch.cuda.is_available() and CUDA_HOME is not None:
source.extend(source_cuda)
extra_cflags = ["-DWITH_CUDA"]
source = [os.path.join(this_dir, s) for s in source]
extra_include_paths = [this_dir]
return load_ext(
"torchvision",
source,
extra_cflags=extra_cflags,
extra_include_paths=extra_include_paths,
)
_C = _load_C_extensions()
|
TensorFlow2/LanguageModeling/BERT/official/utils/flags | flags | _conventions | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Central location for shared argparse convention definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import codecs
import functools
from absl import app as absl_app
from absl import flags
# This codifies help string conventions and makes it easy to update them if
# necessary. Currently the only major effect is that help bodies start on the
# line after flags are listed. All flag definitions should wrap the text bodies
# with help wrap when calling DEFINE_*.
_help_wrap = functools.partial(flags.text_wrap, length=80, indent="",
firstline_indent="\n")
# Pretty formatting causes issues when utf-8 is not installed on a system.
def _stdout_utf8():
try:
codecs.lookup("utf-8")
except LookupError:
return False
return sys.stdout.encoding == "UTF-8"
if _stdout_utf8():
help_wrap = _help_wrap
else:
def help_wrap(text, *args, **kwargs):
return _help_wrap(text, *args, **kwargs).replace(u"\ufeff", u"")
# Replace None with h to also allow -h
absl_app.HelpshortFlag.SHORT_NAME = "h"
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks | notebooks | README | # Tacotron2 and WaveGlow
A jupyter notobook based on Quick Start Guide of: https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2
## Requirements
Ensure you have the following components:
NVIDIA Docker (https://github.com/NVIDIA/nvidia-docker) PyTorch 19.06-py3+ NGC container or newer (https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) NVIDIA Volta (https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or Turing (https://www.nvidia.com/en-us/geforce/turing/) based GPU
Before running the Jupyter notebook, please make sure you already git clone the code from the Github:
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2
```
Copy the Tacotron2.ipynb file into the folder 'Tacotron2'
```bash
cp notebooks/Tacotron2.ipynb .
```
### Running the quick start guide as a Jupyter notebook
To run the notebook on you local machine:
```bash
jupyter notebook Tacotron2.ipynb
```
To run the notebook remotely:
```bash
jupyter notebook --ip=0.0.0.0 --allow-root
```
And navigate a web browser to the IP address or hostname of the host machine at port `8888`:
```
http://[host machine]:8888
```
Use the token listed in the output from running the `jupyter` command to log in, for example:
```
http://[host machine]:8888/?token=aae96ae9387cd28151868fee318c3b3581a2d794f3b25c6b
``` |
PyTorch/Recommendation/DLRM/dlrm/nn | nn | factories | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence
from dlrm.nn.embeddings import (
JointEmbedding, MultiTableEmbeddings, FusedJointEmbedding, JointSparseEmbedding,
Embeddings
)
from dlrm.nn.interactions import Interaction, CudaDotInteraction, DotInteraction, CatInteraction
from dlrm.nn.mlps import AbstractMlp, CppMlp, TorchMlp
from dlrm.utils.distributed import is_distributed
def create_mlp(input_dim: int, sizes: Sequence[int], use_cpp_mlp: bool) -> AbstractMlp:
return CppMlp(input_dim, sizes) if use_cpp_mlp else TorchMlp(input_dim, sizes)
def create_embeddings(
embedding_type: str,
categorical_feature_sizes: Sequence[int],
embedding_dim: int,
device: str = "cuda",
hash_indices: bool = False,
fp16: bool = False
) -> Embeddings:
if embedding_type == "joint":
return JointEmbedding(categorical_feature_sizes, embedding_dim, device=device, hash_indices=hash_indices)
elif embedding_type == "joint_fused":
assert not is_distributed(), "Joint fused embedding is not supported in the distributed mode. " \
"You may want to use 'joint_sparse' option instead."
return FusedJointEmbedding(categorical_feature_sizes, embedding_dim, device=device, hash_indices=hash_indices,
amp_train=fp16)
elif embedding_type == "joint_sparse":
return JointSparseEmbedding(categorical_feature_sizes, embedding_dim, device=device, hash_indices=hash_indices)
elif embedding_type == "multi_table":
return MultiTableEmbeddings(categorical_feature_sizes, embedding_dim,
hash_indices=hash_indices, device=device)
else:
raise NotImplementedError(f"unknown embedding type: {embedding_type}")
def create_interaction(interaction_op: str, embedding_num: int, embedding_dim: int) -> Interaction:
if interaction_op == "dot":
return DotInteraction(embedding_num, embedding_dim)
elif interaction_op == "cuda_dot":
return CudaDotInteraction(
DotInteraction(embedding_num, embedding_dim)
)
elif interaction_op == "cat":
return CatInteraction(embedding_num, embedding_dim)
else:
raise NotImplementedError(f"unknown interaction op: {interaction_op}")
|
PyTorch/SpeechSynthesis/Tacotron2/phrases | phrases | phrase_1_128 | The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the
|
PyTorch/SpeechRecognition/QuartzNet/configs | configs | quartznet15x5_speedp-online-1.15_speca | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: "QuartzNet"
labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"]
input_val:
audio_dataset: &val_dataset
sample_rate: &sample_rate 16000
trim_silence: true
normalize_transcripts: true
filterbank_features: &val_features
normalize: per_feature
sample_rate: *sample_rate
window_size: 0.02
window_stride: 0.01
window: hann
n_filt: &n_filt 64
n_fft: 512
frame_splicing: &frame_splicing 1
dither: 0.00001
pad_align: 16
# For training we keep samples < 16.7s and apply augmentation
input_train:
audio_dataset:
<<: *val_dataset
max_duration: 16.7
ignore_offline_speed_perturbation: true
speed_perturbation:
min_rate: 0.85
max_rate: 1.15
filterbank_features:
<<: *val_features
max_duration: 16.7
spec_augment:
freq_masks: 2
max_freq: 15
time_masks: 2
max_time: 55
quartznet:
encoder:
init: xavier_uniform
in_feats: *n_filt
frame_splicing: *frame_splicing
activation: relu
use_conv_masks: true
blocks:
- &Conv1
filters: 256
repeat: 1
kernel_size: [33]
dilation: [1]
stride: [2]
dropout: 0.0
residual: false
separable: true
- &B1
filters: 256
repeat: 5
kernel_size: [33]
dilation: [1]
stride: [1]
dropout: 0.0
residual: true
separable: true
- *B1
- *B1
- &B2
filters: 256
repeat: 5
kernel_size: [39]
dilation: [1]
stride: [1]
dropout: 0.0
residual: true
separable: true
- *B2
- *B2
- &B3
filters: 512
repeat: 5
kernel_size: [51]
dilation: [1]
stride: [1]
dropout: 0.0
residual: true
separable: true
- *B3
- *B3
- &B4
filters: 512
repeat: 5
kernel_size: [63]
dilation: [1]
stride: [1]
dropout: 0.0
residual: true
separable: true
- *B4
- *B4
- &B5
filters: 512
repeat: 5
kernel_size: [75]
dilation: [1]
stride: [1]
dropout: 0.0
residual: true
separable: true
- *B5
- *B5
- &Conv2
filters: 512
repeat: 1
kernel_size: [87]
dilation: [2]
stride: [1]
dropout: 0.0
residual: false
separable: true
- &Conv3
filters: &enc_feats 1024
repeat: 1
kernel_size: [1]
dilation: [1]
stride: [1]
dropout: 0.0
residual: false
separable: false
decoder:
in_feats: *enc_feats
init: xavier_uniform
|
TensorFlow/Detection/SSD/models/research/slim/datasets | datasets | imagenet_lsvrc_2015_synsets | n01440764
n01443537
n01484850
n01491361
n01494475
n01496331
n01498041
n01514668
n01514859
n01518878
n01530575
n01531178
n01532829
n01534433
n01537544
n01558993
n01560419
n01580077
n01582220
n01592084
n01601694
n01608432
n01614925
n01616318
n01622779
n01629819
n01630670
n01631663
n01632458
n01632777
n01641577
n01644373
n01644900
n01664065
n01665541
n01667114
n01667778
n01669191
n01675722
n01677366
n01682714
n01685808
n01687978
n01688243
n01689811
n01692333
n01693334
n01694178
n01695060
n01697457
n01698640
n01704323
n01728572
n01728920
n01729322
n01729977
n01734418
n01735189
n01737021
n01739381
n01740131
n01742172
n01744401
n01748264
n01749939
n01751748
n01753488
n01755581
n01756291
n01768244
n01770081
n01770393
n01773157
n01773549
n01773797
n01774384
n01774750
n01775062
n01776313
n01784675
n01795545
n01796340
n01797886
n01798484
n01806143
n01806567
n01807496
n01817953
n01818515
n01819313
n01820546
n01824575
n01828970
n01829413
n01833805
n01843065
n01843383
n01847000
n01855032
n01855672
n01860187
n01871265
n01872401
n01873310
n01877812
n01882714
n01883070
n01910747
n01914609
n01917289
n01924916
n01930112
n01943899
n01944390
n01945685
n01950731
n01955084
n01968897
n01978287
n01978455
n01980166
n01981276
n01983481
n01984695
n01985128
n01986214
n01990800
n02002556
n02002724
n02006656
n02007558
n02009229
n02009912
n02011460
n02012849
n02013706
n02017213
n02018207
n02018795
n02025239
n02027492
n02028035
n02033041
n02037110
n02051845
n02056570
n02058221
n02066245
n02071294
n02074367
n02077923
n02085620
n02085782
n02085936
n02086079
n02086240
n02086646
n02086910
n02087046
n02087394
n02088094
n02088238
n02088364
n02088466
n02088632
n02089078
n02089867
n02089973
n02090379
n02090622
n02090721
n02091032
n02091134
n02091244
n02091467
n02091635
n02091831
n02092002
n02092339
n02093256
n02093428
n02093647
n02093754
n02093859
n02093991
n02094114
n02094258
n02094433
n02095314
n02095570
n02095889
n02096051
n02096177
n02096294
n02096437
n02096585
n02097047
n02097130
n02097209
n02097298
n02097474
n02097658
n02098105
n02098286
n02098413
n02099267
n02099429
n02099601
n02099712
n02099849
n02100236
n02100583
n02100735
n02100877
n02101006
n02101388
n02101556
n02102040
n02102177
n02102318
n02102480
n02102973
n02104029
n02104365
n02105056
n02105162
n02105251
n02105412
n02105505
n02105641
n02105855
n02106030
n02106166
n02106382
n02106550
n02106662
n02107142
n02107312
n02107574
n02107683
n02107908
n02108000
n02108089
n02108422
n02108551
n02108915
n02109047
n02109525
n02109961
n02110063
n02110185
n02110341
n02110627
n02110806
n02110958
n02111129
n02111277
n02111500
n02111889
n02112018
n02112137
n02112350
n02112706
n02113023
n02113186
n02113624
n02113712
n02113799
n02113978
n02114367
n02114548
n02114712
n02114855
n02115641
n02115913
n02116738
n02117135
n02119022
n02119789
n02120079
n02120505
n02123045
n02123159
n02123394
n02123597
n02124075
n02125311
n02127052
n02128385
n02128757
n02128925
n02129165
n02129604
n02130308
n02132136
n02133161
n02134084
n02134418
n02137549
n02138441
n02165105
n02165456
n02167151
n02168699
n02169497
n02172182
n02174001
n02177972
n02190166
n02206856
n02219486
n02226429
n02229544
n02231487
n02233338
n02236044
n02256656
n02259212
n02264363
n02268443
n02268853
n02276258
n02277742
n02279972
n02280649
n02281406
n02281787
n02317335
n02319095
n02321529
n02325366
n02326432
n02328150
n02342885
n02346627
n02356798
n02361337
n02363005
n02364673
n02389026
n02391049
n02395406
n02396427
n02397096
n02398521
n02403003
n02408429
n02410509
n02412080
n02415577
n02417914
n02422106
n02422699
n02423022
n02437312
n02437616
n02441942
n02442845
n02443114
n02443484
n02444819
n02445715
n02447366
n02454379
n02457408
n02480495
n02480855
n02481823
n02483362
n02483708
n02484975
n02486261
n02486410
n02487347
n02488291
n02488702
n02489166
n02490219
n02492035
n02492660
n02493509
n02493793
n02494079
n02497673
n02500267
n02504013
n02504458
n02509815
n02510455
n02514041
n02526121
n02536864
n02606052
n02607072
n02640242
n02641379
n02643566
n02655020
n02666196
n02667093
n02669723
n02672831
n02676566
n02687172
n02690373
n02692877
n02699494
n02701002
n02704792
n02708093
n02727426
n02730930
n02747177
n02749479
n02769748
n02776631
n02777292
n02782093
n02783161
n02786058
n02787622
n02788148
n02790996
n02791124
n02791270
n02793495
n02794156
n02795169
n02797295
n02799071
n02802426
n02804414
n02804610
n02807133
n02808304
n02808440
n02814533
n02814860
n02815834
n02817516
n02823428
n02823750
n02825657
n02834397
n02835271
n02837789
n02840245
n02841315
n02843684
n02859443
n02860847
n02865351
n02869837
n02870880
n02871525
n02877765
n02879718
n02883205
n02892201
n02892767
n02894605
n02895154
n02906734
n02909870
n02910353
n02916936
n02917067
n02927161
n02930766
n02939185
n02948072
n02950826
n02951358
n02951585
n02963159
n02965783
n02966193
n02966687
n02971356
n02974003
n02977058
n02978881
n02979186
n02980441
n02981792
n02988304
n02992211
n02992529
n02999410
n03000134
n03000247
n03000684
n03014705
n03016953
n03017168
n03018349
n03026506
n03028079
n03032252
n03041632
n03042490
n03045698
n03047690
n03062245
n03063599
n03063689
n03065424
n03075370
n03085013
n03089624
n03095699
n03100240
n03109150
n03110669
n03124043
n03124170
n03125729
n03126707
n03127747
n03127925
n03131574
n03133878
n03134739
n03141823
n03146219
n03160309
n03179701
n03180011
n03187595
n03188531
n03196217
n03197337
n03201208
n03207743
n03207941
n03208938
n03216828
n03218198
n03220513
n03223299
n03240683
n03249569
n03250847
n03255030
n03259280
n03271574
n03272010
n03272562
n03290653
n03291819
n03297495
n03314780
n03325584
n03337140
n03344393
n03345487
n03347037
n03355925
n03372029
n03376595
n03379051
n03384352
n03388043
n03388183
n03388549
n03393912
n03394916
n03400231
n03404251
n03417042
n03424325
n03425413
n03443371
n03444034
n03445777
n03445924
n03447447
n03447721
n03450230
n03452741
n03457902
n03459775
n03461385
n03467068
n03476684
n03476991
n03478589
n03481172
n03482405
n03483316
n03485407
n03485794
n03492542
n03494278
n03495258
n03496892
n03498962
n03527444
n03529860
n03530642
n03532672
n03534580
n03535780
n03538406
n03544143
n03584254
n03584829
n03590841
n03594734
n03594945
n03595614
n03598930
n03599486
n03602883
n03617480
n03623198
n03627232
n03630383
n03633091
n03637318
n03642806
n03649909
n03657121
n03658185
n03661043
n03662601
n03666591
n03670208
n03673027
n03676483
n03680355
n03690938
n03691459
n03692522
n03697007
n03706229
n03709823
n03710193
n03710637
n03710721
n03717622
n03720891
n03721384
n03724870
n03729826
n03733131
n03733281
n03733805
n03742115
n03743016
n03759954
n03761084
n03763968
n03764736
n03769881
n03770439
n03770679
n03773504
n03775071
n03775546
n03776460
n03777568
n03777754
n03781244
n03782006
n03785016
n03786901
n03787032
n03788195
n03788365
n03791053
n03792782
n03792972
n03793489
n03794056
n03796401
n03803284
n03804744
n03814639
n03814906
n03825788
n03832673
n03837869
n03838899
n03840681
n03841143
n03843555
n03854065
n03857828
n03866082
n03868242
n03868863
n03871628
n03873416
n03874293
n03874599
n03876231
n03877472
n03877845
n03884397
n03887697
n03888257
n03888605
n03891251
n03891332
n03895866
n03899768
n03902125
n03903868
n03908618
n03908714
n03916031
n03920288
n03924679
n03929660
n03929855
n03930313
n03930630
n03933933
n03935335
n03937543
n03938244
n03942813
n03944341
n03947888
n03950228
n03954731
n03956157
n03958227
n03961711
n03967562
n03970156
n03976467
n03976657
n03977966
n03980874
n03982430
n03983396
n03991062
n03992509
n03995372
n03998194
n04004767
n04005630
n04008634
n04009552
n04019541
n04023962
n04026417
n04033901
n04033995
n04037443
n04039381
n04040759
n04041544
n04044716
n04049303
n04065272
n04067472
n04069434
n04070727
n04074963
n04081281
n04086273
n04090263
n04099969
n04111531
n04116512
n04118538
n04118776
n04120489
n04125021
n04127249
n04131690
n04133789
n04136333
n04141076
n04141327
n04141975
n04146614
n04147183
n04149813
n04152593
n04153751
n04154565
n04162706
n04179913
n04192698
n04200800
n04201297
n04204238
n04204347
n04208210
n04209133
n04209239
n04228054
n04229816
n04235860
n04238763
n04239074
n04243546
n04251144
n04252077
n04252225
n04254120
n04254680
n04254777
n04258138
n04259630
n04263257
n04264628
n04265275
n04266014
n04270147
n04273569
n04275548
n04277352
n04285008
n04286575
n04296562
n04310018
n04311004
n04311174
n04317175
n04325704
n04326547
n04328186
n04330267
n04332243
n04335435
n04336792
n04344873
n04346328
n04347754
n04350905
n04355338
n04355933
n04356056
n04357314
n04366367
n04367480
n04370456
n04371430
n04371774
n04372370
n04376876
n04380533
n04389033
n04392985
n04398044
n04399382
n04404412
n04409515
n04417672
n04418357
n04423845
n04428191
n04429376
n04435653
n04442312
n04443257
n04447861
n04456115
n04458633
n04461696
n04462240
n04465501
n04467665
n04476259
n04479046
n04482393
n04483307
n04485082
n04486054
n04487081
n04487394
n04493381
n04501370
n04505470
n04507155
n04509417
n04515003
n04517823
n04522168
n04523525
n04525038
n04525305
n04532106
n04532670
n04536866
n04540053
n04542943
n04548280
n04548362
n04550184
n04552348
n04553703
n04554684
n04557648
n04560804
n04562935
n04579145
n04579432
n04584207
n04589890
n04590129
n04591157
n04591713
n04592741
n04596742
n04597913
n04599235
n04604644
n04606251
n04612504
n04613696
n06359193
n06596364
n06785654
n06794110
n06874185
n07248320
n07565083
n07579787
n07583066
n07584110
n07590611
n07613480
n07614500
n07615774
n07684084
n07693725
n07695742
n07697313
n07697537
n07711569
n07714571
n07714990
n07715103
n07716358
n07716906
n07717410
n07717556
n07718472
n07718747
n07720875
n07730033
n07734744
n07742313
n07745940
n07747607
n07749582
n07753113
n07753275
n07753592
n07754684
n07760859
n07768694
n07802026
n07831146
n07836838
n07860988
n07871810
n07873807
n07875152
n07880968
n07892512
n07920052
n07930864
n07932039
n09193705
n09229709
n09246464
n09256479
n09288635
n09332890
n09399592
n09421951
n09428293
n09468604
n09472597
n09835506
n10148035
n10565667
n11879895
n11939491
n12057211
n12144580
n12267677
n12620546
n12768682
n12985857
n12998815
n13037406
n13040303
n13044778
n13052670
n13054560
n13133613
n15075141
|
PyTorch/LanguageModeling | LanguageModeling | README | # Language Modeling
Language modeling (LM) is a natural language processing (NLP) task that determines the probability of a given sequence of words occurring in a sentence.
In an era where computers, smartphones and other electronic devices increasingly need to interact with humans, language modeling has become an indispensable technique for teaching devices how to communicate in natural languages in human-like ways.
But how does language modeling work? And what can you build with it? What are the different approaches, what are its potential benefits and limitations, and how might you use it in your business?
In this guide, you’ll find answers to all of those questions and more. Whether you’re an experienced machine learning engineer considering implementation, a developer wanting to learn more, or a product manager looking to explore what’s possible with natural language processing and language modeling, this guide is for you.
Here’s a look at what we’ll cover:
- Language modeling – the basics
- How does language modeling work?
- Use cases and applications
- Getting started
## Language modeling – the basics
### What is language modeling?
"*Language modeling is the task of assigning a probability to sentences in a language. […]
Besides assigning a probability to each sequence of words, the language models also assign a
probability for the likelihood of a given word (or a sequence of words) to follow a sequence
of words.*" Source: Page 105, [Neural Network Methods in Natural Language Processing](http://amzn.to/2wt1nzv), 2017.
### Types of language models
There are primarily two types of Language Models:
- Statistical Language Models: These models use traditional statistical techniques like N-grams, Hidden Markov Models (HMM), and certain linguistic rules to learn the probability distribution of words.
- Neural Language Models: They use different kinds of Neural Networks to model language, and have surpassed the statistical language models in their effectiveness.
"*We provide ample empirical evidence to suggest that connectionist language models are
superior to standard n-gram techniques, except their high computational (training)
complexity.*" Source: [Recurrent neural network based language model](http://www.fit.vutbr.cz/research/groups/speech/publi/2010/mikolov_interspeech2010_IS100722.pdf), 2010.
Given the superior performance of neural language models, we include in the container two popular state-of-the-art neural language models: BERT and Transformer-XL.
### Why is language modeling important?
Language modeling is fundamental in modern NLP applications. It enables machines to understand qualitative information, and enables people to communicate with machines in the natural languages that humans use to communicate with each other.
Language modeling is used directly in a variety of industries, including tech, finance, healthcare, transportation, legal, military, government, and more -- actually, you probably have just interacted with a language model today, whether it be through Google search, engaging with a voice assistant, or using text autocomplete features.
## How does language modeling work?
The roots of modern language modeling can be traced back to 1948, when Claude Shannon
published a paper titled "A Mathematical Theory of Communication", laying the foundation for information theory and language modeling. In the paper, Shannon detailed the use of a stochastic model called the Markov chain to create a statistical model for the sequences of letters in English text. The Markov models, along with n-gram, are still among the most popular statistical language models today.
However, simple statistical language models have serious drawbacks in scalability and fluency because of its sparse representation of language. Overcoming the problem by representing language units (eg. words, characters) as a non-linear, distributed combination of weights in continuous space, neural language models can learn to approximate words without being misled by rare or unknown values.
Therefore, as mentioned above, we introduce two popular state-of-the-art neural language models, BERT and Transformer-XL, in Tensorflow and PyTorch. More details can be found in the [NVIDIA Deep Learning Examples Github Repository ](https://github.com/NVIDIA/DeepLearningExamples)
## Use cases and applications
### Speech Recognition
Imagine speaking a phrase to the phone, expecting it to convert the speech to text. How does
it know if you said "recognize speech" or "wreck a nice beach"? Language models help figure it out
based on the context, enabling machines to process and make sense of speech audio.
### Spelling Correction
Language-models-enabled spellcheckers can point to spelling errors and possibly suggest alternatives.
### Machine translation
Imagine you are translating the Chinese sentence "我在开车" into English. Your translation system gives you several choices:
- I at open car
- me at open car
- I at drive
- me at drive
- I am driving
- me am driving
A language model tells you which translation sounds the most natural.
## Getting started
NVIDIA provides examples for Language Modeling on [Deep Learning Examples Github Repository](https://github.com/NVIDIA/DeepLearningExamples). These examples provide you with easy to consume and highly optimized scripts for both training and inferencing. The quick start guide at our GitHub repository will help you in setting up the environment using NGC Docker Images, download pre-trained models from NGC and adapt the model training and inference for your application/use-case.
These models are tested and maintained by NVIDIA, leveraging mixed precision using tensor cores on our latest GPUs for faster training times while maintaining accuracy.
|
Kaldi/SpeechRecognition/scripts | scripts | compute_wer | #!/bin/bash
model_path="/data/models/LibriSpeech"
librispeech_path="/data/datasets/LibriSpeech/test_clean"
result_path="/data/results"
# Correctness
cat $model_path/words.txt | tr '[:upper:]' '[:lower:]' > $result_path/words.txt
cat $librispeech_path/$test_set/text | tr '[:upper:]' '[:lower:]' > $result_path/text
oovtok=$(cat $result_path/words.txt | grep "<unk>" | awk '{print $2}')
/opt/kaldi/egs/wsj/s5/utils/sym2int.pl --map-oov $oovtok -f 2- $result_path/words.txt $result_path/text > $result_path/text_ints 2> /dev/null
# convert lattice to transcript
/opt/kaldi/src/latbin/lattice-best-path \
"ark:gunzip -c $result_path/lat.cuda-asr.gz |"\
"ark,t:$result_path/trans.cuda-asr" 2> /dev/null
# calculate wer
/opt/kaldi/src/bin/compute-wer --mode=present \
"ark:$result_path/text_ints" \
"ark:$result_path/trans.cuda-asr" 2> /dev/null
|
PyTorch/SpeechSynthesis/HiFiGAN | HiFiGAN | train | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import os
from functools import partial
from itertools import islice
import numpy as np
import torch
import torch.nn.functional as F
from torch.cuda import amp
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import AdamW
from torch.optim.lr_scheduler import ExponentialLR
from apex.optimizers import FusedAdam, FusedLAMB
import models
from common import tb_dllogger as logger, utils, gpu_affinity
from common.utils import (Checkpointer, freeze, init_distributed, print_once,
reduce_tensor, unfreeze, l2_promote)
from hifigan.data_function import get_data_loader, mel_spectrogram
from hifigan.logging import init_logger, Metrics
from hifigan.models import (MultiPeriodDiscriminator, MultiScaleDiscriminator,
feature_loss, generator_loss, discriminator_loss)
def parse_args(parser):
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('--log_file', type=str, default=None,
help='Path to a DLLogger log file')
train = parser.add_argument_group('training setup')
train.add_argument('--epochs', type=int, required=True,
help='Number of total epochs to run')
train.add_argument('--epochs_this_job', type=int, default=None,
help='Number of epochs in partial training run')
train.add_argument('--keep_milestones', type=int, nargs='+',
default=[1000, 2000, 3000, 4000, 5000, 6000],
help='Milestone checkpoints to keep from removing')
train.add_argument('--checkpoint_interval', type=int, default=50,
help='Saving checkpoints frequency (in epochs)')
train.add_argument('--step_logs_interval', default=1, type=int,
help='Step logs dumping frequency (in steps)')
train.add_argument('--validation_interval', default=10, type=int,
help='Validation frequency (in epochs)')
train.add_argument('--samples_interval', default=100, type=int,
help='Dumping audio samples frequency (in epochs)')
train.add_argument('--resume', action='store_true',
help='Resume training from the last checkpoint')
train.add_argument('--checkpoint_path_gen', type=str, default=None,
help='Resume training from a selected checkpoint')
train.add_argument('--checkpoint_path_discrim', type=str, default=None,
help='Resume training from a selected checkpoint')
train.add_argument('--seed', type=int, default=1234,
help='Seed for PyTorch random number generators')
train.add_argument('--amp', action='store_true',
help='Enable AMP')
train.add_argument('--autocast_spectrogram', action='store_true',
help='Enable autocast while computing spectrograms')
train.add_argument('--cuda', action='store_true',
help='Run on GPU using CUDA')
train.add_argument('--disable_cudnn_benchmark', action='store_true',
help='Disable cudnn benchmark mode')
train.add_argument('--ema_decay', type=float, default=0,
help='Discounting factor for training weights EMA')
train.add_argument('--grad_accumulation', type=int, default=1,
help='Training steps to accumulate gradients for')
train.add_argument('--num_workers', type=int, default=1,
help='Data loader workers number')
train.add_argument('--fine_tuning', action='store_true',
help='Enable fine-tuning')
train.add_argument('--input_mels_dir', type=str, default=None,
help='Directory with mels for fine-tuning')
train.add_argument('--benchmark_epochs_num', type=int, default=5)
train.add_argument('--no_amp_grouped_conv', action='store_true',
help='Disable AMP on certain convs for better perf')
opt = parser.add_argument_group('optimization setup')
opt.add_argument('--optimizer', type=str, default='adamw',
help='Optimization algorithm')
opt.add_argument('--lr_decay', type=float, default=0.9998,
help='Learning rate decay')
opt.add_argument('-lr', '--learning_rate', type=float, required=True,
help='Learning rate')
opt.add_argument('--fine_tune_lr_factor', type=float, default=1.,
help='Learning rate multiplier for fine-tuning')
opt.add_argument('--adam_betas', type=float, nargs=2, default=(0.8, 0.99),
help='Adam Beta coefficients')
opt.add_argument('--grad_clip_thresh', default=1000.0, type=float,
help='Clip threshold for gradients')
opt.add_argument('-bs', '--batch_size', type=int, required=True,
help=('Batch size per training iter. '
'May be split into grad accumulation steps.'))
opt.add_argument('--warmup_steps', type=int, default=1000,
help='Number of steps for lr warmup')
data = parser.add_argument_group('dataset parameters')
data.add_argument('-d', '--dataset_path', default='data/LJSpeech-1.1',
help='Path to dataset', type=str)
data.add_argument('--training_files', type=str, required=True, nargs='+',
help='Paths to training filelists.')
data.add_argument('--validation_files', type=str, required=True, nargs='+',
help='Paths to validation filelists.')
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max_wav_value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling_rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter_length', default=1024, type=int,
help='Filter length')
audio.add_argument('--num_mels', default=80, type=int,
help='number of Mel bands')
audio.add_argument('--hop_length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win_length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel_fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel_fmax', default=8000.0, type=float,
help='Maximum mel frequency')
audio.add_argument('--mel_fmax_loss', default=None, type=float,
help='Maximum mel frequency used for computing loss')
audio.add_argument('--segment_size', default=8192, type=int,
help='Training segment size')
dist = parser.add_argument_group('distributed setup')
dist.add_argument(
'--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0),
help='Rank of the process for multiproc. Do not set manually.')
dist.add_argument(
'--world_size', type=int, default=os.getenv('WORLD_SIZE', 1),
help='Number of processes for multiproc. Do not set manually.')
dist.add_argument('--affinity', type=str,
default='socket_unique_interleaved',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
return parser
def validate(args, gen, mel_spec, mpd, msd, val_loader, val_metrics):
gen.eval()
val_metrics.start_val()
with torch.no_grad():
for i, batch in enumerate(val_loader):
x, y, _, y_mel = batch
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True).unsqueeze(1)
y_mel = y_mel.cuda(non_blocking=True)
with autocast(enabled=args.amp):
y_g_hat = gen(x)
with autocast(enabled=args.amp and args.autocast_spectrogram):
y_g_hat_mel = mel_spec(y_g_hat.float().squeeze(1),
fmax=args.mel_fmax_loss)
with autocast(enabled=args.amp):
# val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item() * 45
# NOTE: Scale by 45.0 to match train loss magnitude
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
val_metrics['loss_discrim'] = reduce_tensor(
loss_disc_s + loss_disc_f, args.world_size)
val_metrics['loss_gen'] = reduce_tensor(loss_gen_all,
args.world_size)
val_metrics['loss_mel'] = reduce_tensor(loss_mel, args.world_size)
val_metrics['frames'] = x.size(0) * x.size(1) * args.world_size
val_metrics.accumulate(scopes=['val'])
val_metrics.finish_val()
gen.train()
def main():
parser = argparse.ArgumentParser(description='PyTorch HiFi-GAN Training',
allow_abbrev=False)
parser = models.parse_model_args('HiFi-GAN', parse_args(parser))
args, unk_args = parser.parse_known_args()
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
if args.affinity != 'disabled':
nproc_per_node = torch.cuda.device_count()
print(nproc_per_node)
affinity = gpu_affinity.set_affinity(
args.local_rank,
nproc_per_node,
args.affinity
)
print(f'{args.local_rank}: thread affinity: {affinity}')
# seeds, distributed init, logging, cuDNN
distributed_run = args.world_size > 1
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
if distributed_run:
init_distributed(args, args.world_size, args.local_rank)
metrics = Metrics(scopes=['train', 'train_avg'],
benchmark_epochs=args.benchmark_epochs_num,
cuda=args.cuda)
val_metrics = Metrics(scopes=['val'], cuda=args.cuda)
init_logger(args.output, args.log_file, args.ema_decay)
logger.parameters(vars(args), tb_subset='train')
l2_promote()
torch.backends.cudnn.benchmark = not args.disable_cudnn_benchmark
train_setup = models.get_model_train_setup('HiFi-GAN', args)
gen_config = models.get_model_config('HiFi-GAN', args)
gen = models.get_model('HiFi-GAN', gen_config, 'cuda')
mpd = MultiPeriodDiscriminator(periods=args.mpd_periods,
concat_fwd=args.concat_fwd).cuda()
assert args.amp or not args.no_amp_grouped_conv, \
"--no-amp-grouped-conv is applicable only when AMP is enabled"
msd = MultiScaleDiscriminator(concat_fwd=args.concat_fwd,
no_amp_grouped_conv=args.no_amp_grouped_conv)
msd = msd.cuda()
mel_spec = partial(mel_spectrogram, n_fft=args.filter_length,
num_mels=args.num_mels,
sampling_rate=args.sampling_rate,
hop_size=args.hop_length, win_size=args.win_length,
fmin=args.mel_fmin)
kw = {'lr': args.learning_rate, 'betas': args.adam_betas}
proto = {'adam': FusedAdam, 'lamb': FusedLAMB, 'adamw': AdamW
}[args.optimizer]
optim_g = proto(gen.parameters(), **kw)
optim_d = proto(itertools.chain(msd.parameters(), mpd.parameters()), **kw)
scaler_g = amp.GradScaler(enabled=args.amp)
scaler_d = amp.GradScaler(enabled=args.amp)
# setup EMA
if args.ema_decay > 0:
# burried import, requires apex
from common.ema_utils import (apply_multi_tensor_ema,
init_multi_tensor_ema)
gen_ema = models.get_model('HiFi-GAN', gen_config, 'cuda').cuda()
mpd_ema = MultiPeriodDiscriminator(
periods=args.mpd_periods,
concat_fwd=args.concat_fwd).cuda()
msd_ema = MultiScaleDiscriminator(
concat_fwd=args.concat_fwd,
no_amp_grouped_conv=args.no_amp_grouped_conv).cuda()
else:
gen_ema, mpd_ema, msd_ema = None, None, None
# setup DDP
if distributed_run:
kw = {'device_ids': [args.local_rank],
'output_device': args.local_rank}
gen = DDP(gen, **kw)
msd = DDP(msd, **kw)
# DDP needs nonempty model
mpd = DDP(mpd, **kw) if len(args.mpd_periods) else mpd
# resume from last / load a checkpoint
train_state = {}
checkpointer = Checkpointer(args.output, args.keep_milestones)
checkpointer.maybe_load(
gen, mpd, msd, optim_g, optim_d, scaler_g, scaler_d, train_state, args,
gen_ema=None, mpd_ema=None, msd_ema=None)
iters_all = train_state.get('iters_all', 0)
last_epoch = train_state['epoch'] + 1 if 'epoch' in train_state else -1
sched_g = ExponentialLR(optim_g, gamma=args.lr_decay, last_epoch=last_epoch)
sched_d = ExponentialLR(optim_d, gamma=args.lr_decay, last_epoch=last_epoch)
if args.fine_tuning:
print_once('Doing fine-tuning')
train_loader = get_data_loader(args, distributed_run, train=True)
val_loader = get_data_loader(args, distributed_run, train=False,
val_kwargs=dict(repeat=5, split=True))
val_samples_loader = get_data_loader(args, False, train=False,
val_kwargs=dict(split=False),
batch_size=1)
if args.ema_decay > 0.0:
gen_ema_params = init_multi_tensor_ema(gen, gen_ema)
mpd_ema_params = init_multi_tensor_ema(mpd, mpd_ema)
msd_ema_params = init_multi_tensor_ema(msd, msd_ema)
epochs_done = 0
for epoch in range(max(1, last_epoch), args.epochs + 1):
metrics.start_epoch(epoch)
if distributed_run:
train_loader.sampler.set_epoch(epoch)
gen.train()
mpd.train()
msd.train()
iter_ = 0
iters_num = len(train_loader) // args.grad_accumulation
for step, batch in enumerate(train_loader):
if step // args.grad_accumulation >= iters_num:
break # only full effective batches
is_first_accum_step = step % args.grad_accumulation == 0
is_last_accum_step = (step + 1) % args.grad_accumulation == 0
assert (args.grad_accumulation > 1
or (is_first_accum_step and is_last_accum_step))
if is_first_accum_step:
iter_ += 1
iters_all += 1
metrics.start_iter(iter_)
accum_batches = []
optim_d.zero_grad(set_to_none=True)
optim_g.zero_grad(set_to_none=True)
x, y, _, y_mel = batch
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True).unsqueeze(1)
y_mel = y_mel.cuda(non_blocking=True)
accum_batches.append((x, y, y_mel))
with torch.set_grad_enabled(is_last_accum_step), \
autocast(enabled=args.amp):
y_g_hat = gen(x)
unfreeze(mpd)
unfreeze(msd)
with autocast(enabled=args.amp):
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
metrics['loss_discrim'] = reduce_tensor(loss_disc_all, args.world_size)
metrics['frames'] = x.size(0) * x.size(1) * args.world_size
metrics.accumulate()
loss_disc_all /= args.grad_accumulation
scaler_d.scale(loss_disc_all).backward()
if not is_last_accum_step:
continue
scaler_d.step(optim_d)
scaler_d.update()
# generator
freeze(mpd)
freeze(msd)
for _i, (x, y, y_mel) in enumerate(reversed(accum_batches)):
if _i != 0: # first `y_g_hat` can be reused
with autocast(enabled=args.amp):
y_g_hat = gen(x)
with autocast(enabled=args.amp and args.autocast_spectrogram):
y_g_hat_mel = mel_spec(y_g_hat.float().squeeze(1),
fmax=args.mel_fmax_loss)
# L1 mel-spectrogram Loss
with autocast(enabled=args.amp):
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
metrics['loss_gen'] = reduce_tensor(loss_gen_all, args.world_size)
metrics['loss_mel'] = reduce_tensor(loss_mel, args.world_size)
metrics.accumulate()
loss_gen_all /= args.grad_accumulation
scaler_g.scale(loss_gen_all).backward()
scaler_g.step(optim_g)
scaler_g.update()
metrics['lrate_gen'] = optim_g.param_groups[0]['lr']
metrics['lrate_discrim'] = optim_d.param_groups[0]['lr']
metrics.accumulate()
if args.ema_decay > 0.0:
apply_multi_tensor_ema(args.ema_decay, *gen_ema_params)
apply_multi_tensor_ema(args.ema_decay, *mpd_ema_params)
apply_multi_tensor_ema(args.ema_decay, *msd_ema_params)
metrics.finish_iter() # done accumulating
if iters_all % args.step_logs_interval == 0:
logger.log((epoch, iter_, iters_num), metrics, scope='train',
tb_iter=iters_all, flush_log=True)
assert is_last_accum_step
metrics.finish_epoch()
logger.log((epoch,), metrics, scope='train_avg', flush_log=True)
if epoch % args.validation_interval == 0:
validate(args, gen, mel_spec, mpd, msd, val_loader, val_metrics)
logger.log((epoch,), val_metrics, scope='val', tb_iter=iters_all,
flush_log=True)
# validation samples
if epoch % args.samples_interval == 0 and args.local_rank == 0:
gen.eval()
with torch.no_grad():
for i, batch in enumerate(islice(val_samples_loader, 5)):
x, y, _, _ = batch
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True).unsqueeze(1)
with autocast(enabled=args.amp):
y_g_hat = gen(x)
with autocast(enabled=args.amp and args.autocast_spectrogram):
# args.fmax instead of args.max_for_inference
y_hat_spec = mel_spec(y_g_hat.float().squeeze(1),
fmax=args.mel_fmax)
logger.log_samples_tb(iters_all, i, y_g_hat, y_hat_spec,
args.sampling_rate)
if epoch == args.samples_interval: # ground truth
logger.log_samples_tb(0, i, y, x, args.sampling_rate)
gen.train()
train_state.update({'epoch': epoch, 'iters_all': iters_all})
# save before making sched.step() for proper loading of LR
checkpointer.maybe_save(
gen, mpd, msd, optim_g, optim_d, scaler_g, scaler_d, epoch,
train_state, args, gen_config, train_setup,
gen_ema=gen_ema, mpd_ema=mpd_ema, msd_ema=msd_ema)
logger.flush()
sched_g.step()
sched_d.step()
epochs_done += 1
if (args.epochs_this_job is not None
and epochs_done == args.epochs_this_job):
break
# finished training
if epochs_done > 0:
logger.log((), metrics, scope='train_benchmark', flush_log=True)
if epoch % args.validation_interval != 0: # val metrics are not up-to-date
validate(args, gen, mel_spec, mpd, msd, val_loader, val_metrics)
logger.log((), val_metrics, scope='val', flush_log=True)
else:
print_once(f'Finished without training after epoch {args.epochs}.')
if __name__ == '__main__':
main()
|
PyTorch/LanguageModeling/BERT/triton/dist4l/runner | runner | config_NVIDIA-DGX-1-(1x-V100-32GB) | checkpoints:
- name: dist-4l-qa
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/bert_pyt_ckpt_distilled_4l_288d_qa_squad11_amp/versions/21.11.0/zip
configurations:
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
- 8
- 16
batch_sizes: 1 8 16
capture_cuda_graph: 0
checkpoint_variant: dist-4l-qa
export_format: ts-trace
export_precision: fp16
format: ts-trace
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
container_version: '21.10'
datasets:
- name: data
datasets_dir: datasets
framework: PyTorch
model_name: BERT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
PyTorch/Classification/ConvNets/resnet50v1.5/training/AMP | AMP | DGXA100_resnet50_AMP_90E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGXA100 /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
|
TensorFlow/Detection/SSD/models/research/object_detection/matchers | matchers | argmax_matcher_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.matchers.argmax_matcher."""
import numpy as np
import tensorflow as tf
from object_detection.matchers import argmax_matcher
from object_detection.utils import test_case
class ArgMaxMatcherTest(test_case.TestCase):
def test_return_correct_matches_with_default_thresholds(self):
def graph_fn(similarity_matrix):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
match = matcher.match(similarity_matrix)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1., 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_rows = np.array([2, 0, 1, 0, 1])
(res_matched_cols, res_unmatched_cols,
res_match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(res_match_results[res_matched_cols],
expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], [0, 1, 2, 3, 4])
self.assertFalse(np.all(res_unmatched_cols))
def test_return_correct_matches_with_empty_rows(self):
def graph_fn(similarity_matrix):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
match = matcher.match(similarity_matrix)
return match.unmatched_column_indicator()
similarity = 0.2 * np.ones([0, 5], dtype=np.float32)
res_unmatched_cols = self.execute(graph_fn, [similarity])
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], np.arange(5))
def test_return_correct_matches_with_matched_threshold(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1, 2])
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_with_matched_and_unmatched_threshold(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_negatives_lower_than_unmatched_false(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(
matched_threshold=3.,
unmatched_threshold=2.,
negatives_lower_than_unmatched=False)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_not_using_force_match(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3])
expected_matched_rows = np.array([2, 0])
expected_unmatched_cols = np.array([1, 2, 4])
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_while_using_force_match(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.,
force_match_for_each_row=True)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([2, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_using_force_match_padded_groundtruth(self):
def graph_fn(similarity, valid_rows):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.,
force_match_for_each_row=True)
match = matcher.match(similarity, valid_rows)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[0, 0, 0, 0, 0],
[3, 0, -1, 2, 0],
[0, 0, 0, 0, 0]], dtype=np.float32)
valid_rows = np.array([True, True, False, True, False])
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([3, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity, valid_rows])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_valid_arguments_corner_case(self):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1)
def test_invalid_arguments_corner_case_negatives_lower_than_thres_false(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1,
negatives_lower_than_unmatched=False)
def test_invalid_arguments_no_matched_threshold(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=None,
unmatched_threshold=4)
def test_invalid_arguments_unmatched_thres_larger_than_matched_thres(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=2)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Classification/ConvNets/se-resnext101-32x4d/training/AMP | AMP | DGXA100_se-resnext101-32x4d_AMP_250E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model se-resnext101-32x4d --precision AMP --mode convergence --platform DGXA100 /imagenet --workspace ${1:-./} --raport-file raport.json
|
PyTorch/LanguageModeling/BERT/triton | triton | prepare_input_data | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
from pathlib import Path
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
load_from_file,
)
LOGGER = logging.getLogger("prepare_input_data")
def _parse_and_validate_args():
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--input-data-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
data = []
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info("Data loader initialized; Creating benchmark data")
for _, x, _ in tqdm(dataloader_fn(), unit="batch", mininterval=10):
for input__0, input__1, input__2 in zip(x["input__0"], x["input__1"], x["input__2"]):
data.append(
{
"input__0": input__0.tolist(),
"input__1": input__1.tolist(),
"input__2": input__2.tolist(),
}
)
LOGGER.info("Dumping data")
with open(Path(args.input_data_dir) / "data.json", "w") as fd:
fd.write(json.dumps({"data": data}))
LOGGER.info("Dumped")
if __name__ == "__main__":
main()
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils | utils | metric_logger | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import defaultdict
from collections import deque
import torch
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
def get_dict(self):
loss_dict = {}
for name, meter in self.meters.items():
loss_dict[name] = "{:.4f} ({:.4f})".format(meter.median, meter.global_avg)
return loss_dict
|
TensorFlow2/Classification/ConvNets/config/efficientnet_v1 | efficientnet_v1 | b0_cfg | import tensorflow as tf
from config.defaults import Config
# NOTE: this confile file can further be overridden by user-defined params provided at the command line
config = dict(
path_to_impl='model.efficientnet_model_v1',
#data-related model params
num_classes=1000, # must be the same as data.num_classes
input_channels= 3,
rescale_input= 1, # binary,
mean_rgb=(0.485 * 255, 0.456 * 255, 0.406 * 255), # used when rescale_input=True
std_rgb=(0.229 * 255, 0.224 * 255, 0.225 * 255), # used when rescale_input=True
dtype= tf.float32, #used for input image normalization/casting, # tf.float32, tf.bfloat16, tf.float16, tf.float32, tf.bfloat16,
# GUIDE
# width depth resolution dropout
# efficientnet_v1-b0 1.0 1.0 224 0.2
# 'efficientnet_v1-b1 1.0 1.1 240 0.2
# 'efficientnet_v1-b2 1.1 1.2 260 0.3
# 'efficientnet_v1-b3 1.2 1.4 300 0.3
# 'efficientnet_v1-b4 1.4 1.8 380 0.4
# 'efficientnet_v1-b5 1.6 2.2 456 0.4
# 'efficientnet_v1-b6 1.8 2.6 528 0.5
# 'efficientnet_v1-b7 2.0 3.1 600 0.5
# 'efficientnet_v1-b8 2.2 3.6 672 0.5
# 'efficientnet_v1-l2 4.3 5.3 800 0.5
width_coefficient= 1.0,
depth_coefficient= 1.0,
dropout_rate= 0.2,
# image resolution must be set in tr/eval/predict configs below
drop_connect_rate= 0.2,
stem_base_filters= 32,
top_base_filters= 1280,
activation= 'swish',
depth_divisor= 8,
min_depth= None,
use_se= 1, # binary
batch_norm= 'syncbn',
bn_momentum= 0.99,
bn_epsilon= 1e-3,
weight_init= 'fan_out',
blocks= (
# (input_filters, output_filters, kernel_size, num_repeat,expand_ratio, strides, se_ratio)
# pylint: disable=bad-whitespace
dict(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
# pylint: enable=bad-whitespace
),
)
# train_config = dict(lr_decay='cosine',
#
# max_epochs=500,
# img_size=224,
# batch_size=256,
# save_checkpoint_freq=5,
# lr_init=0.005,
# weight_decay=5e-6,
# epsilon=0.001,
# resume_checkpoint=1,
# enable_tensorboard=0
# )
#
# eval_config = dict(img_size=224,
# batch_size=256)
#
# data_config = dict(
# data_dir='/data/',
# augmenter_name='autoaugment',
# mixup_alpha=0.0,
#
#
# )
# runtime_config = dict(mode='train_and_eval',
# model_dir='./output/',
# use_amp=1,
# use_xla=1,
# log_steps=100
# )
#
# config = dict(model=model_config,
# train=train_config,
# eval=eval_config,
# data=data_config,
# runtime=runtime_config,
# )
|
TensorFlow/LanguageModeling/BERT/biobert/scripts | scripts | biobert_finetune_train_benchmark | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
task=${1:-"ner_bc5cdr-chem"}
num_gpu=${2:-"2"}
bert_model=${3:-"base"}
cased=${4:-"false"}
epochs=2.0
if [ "$cased" = "true" ] ; then
DO_LOWER_CASE=0
CASING_DIR_PREFIX="cased"
case_flag="--do_lower_case=False"
else
DO_LOWER_CASE=1
CASING_DIR_PREFIX="uncased"
case_flag="--do_lower_case=True"
fi
if [ "$bert_model" = "large" ] ; then
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-24_H-1024_A-16
else
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-12_H-768_A-12
fi
if [ $num_gpu -gt 1 ] ; then
mpi_command="mpirun -np $num_gpu -H localhost:$num_gpu \
--allow-run-as-root -bind-to none -map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH -mca pml ob1 -mca btl ^openib"
use_hvd="--horovod"
else
mpi_command=""
use_hvd=""
fi
DATESTAMP=`date +'%y%m%d%H%M%S'`
printf -v TAG "tf_bert_biobert_%s_training_benchmark_%s_%s_num_gpu_%d" "$task" "$bert_model" "$CASING_DIR_PREFIX" "$num_gpu"
OUTPUT_DIR=/results/${TAG}_${DATESTAMP}
mkdir -p ${OUTPUT_DIR}
if [ "$task" = "ner_bc5cdr-chem" ] ; then
DATASET_DIR=/workspace/bert/data/biobert/BC5CDR/chem
LOGFILE="${OUTPUT_DIR}/${task}_training_benchmark_bert_${bert_model}_gpu_${num_gpu}.log"
echo "Training performance benchmarking for BERT $bert_model from $BERT_DIR" >> $LOGFILE
echo "Precision Sequence Length Batch size Performance(sent/sec)" >> $LOGFILE
for seq_length in 128 512; do
for train_batch_size in 8 32 64; do
for use_fp16 in "--amp" "--noamp"; do
res_dir=${OUTPUT_DIR}/bert_${bert_model}_gpu_${num_gpu}_sl_${seq_length}_prec_${use_fp16}_bs_${batch_size}
mkdir -p ${res_dir}
tmp_file="${res_dir}/${task}_training_benchmark.log"
$mpi_command python /workspace/bert/run_ner.py \
--do_prepare=true \
--do_train=true \
--do_eval=true \
--do_predict=true \
--task_name=bc5cdr \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint="$BERT_DIR/bert_model.ckpt" \
--num_train_epochs=$epochs \
--data_dir=$DATASET_DIR \
--output_dir=$res_dir \
--train_batch_size=$train_batch_size \
--max_seq_length=$seq_length \
$use_hvd $use_fp16 --use_xla $case_flag |& tee $tmp_file
perf=`cat $tmp_file | grep -F 'Throughput Average (sentences/sec) =' | head -1 | awk -F'= ' '{print $2}' | awk -F' sen' '{print $1}'`
echo "${use_fp16} $seq_length $train_batch_size $perf" >> $LOGFILE
done
done
done
elif [ "$task" = "ner_bc5cdr-disease" ] ; then
DATASET_DIR=/workspace/bert/data/biobert/BC5CDR/disease
LOGFILE="${OUTPUT_DIR}/${task}_training_benchmark_bert_${bert_model}_gpu_${num_gpu}.log"
echo "Training performance benchmarking for BERT $bert_model from $BERT_DIR" >> $LOGFILE
echo "Precision Sequence Length Batch size Performance(sent/sec)" >> $LOGFILE
for seq_length in 128 512; do
for train_batch_size in 8 32 64; do
for use_fp16 in "--amp" "--noamp"; do
res_dir=${OUTPUT_DIR}/bert_${bert_model}_gpu_${num_gpu}_sl_${seq_length}_prec_${use_fp16}_bs_${batch_size}
mkdir -p ${res_dir}
tmp_file="${res_dir}/${task}_training_benchmark.log"
$mpi_command python3 /workspace/bert/run_ner.py \
--do_prepare=true \
--do_train=true \
--do_eval=true \
--do_predict=true \
--task_name="bc5cdr" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint="$BERT_DIR/bert_model.ckpt" \
--num_train_epochs=$epochs \
--data_dir=$DATASET_DIR \
--output_dir=$res_dir \
--train_batch_size=$train_batch_size \
--max_seq_length=$seq_length \
"$use_hvd" "$use_fp16" --use_xla $case_flag |& tee $tmp_file
perf=`cat $tmp_file | grep -F 'Throughput Average (sentences/sec) =' | head -1 | awk -F'= ' '{print $2}' | awk -F' sen' '{print $1}'`
echo "${use_fp16} $seq_length $train_batch_size $perf" >> $LOGFILE
done
done
done
elif [ "$task" = "rel_chemprot" ] ; then
DATASET_DIR=/workspace/bert/data/biobert/chemprot-data_treeLSTM
LOGFILE="${OUTPUT_DIR}/${task}_training_benchmark_bert_${bert_model}_gpu_${num_gpu}.log"
echo "Training performance benchmarking for BERT $bert_model from $BERT_DIR" >> $LOGFILE
echo "Precision Sequence Length Batch size Performance(sent/sec)" >> $LOGFILE
for seq_length in 128 512; do
for train_batch_size in 8 32 64; do
for use_fp16 in "--amp" "--noamp"; do
res_dir=${OUTPUT_DIR}/bert_${bert_model}_gpu_${num_gpu}_sl_${seq_length}_prec_${use_fp16}_bs_${batch_size}
mkdir -p ${res_dir}
tmp_file="${res_dir}/${task}_training_benchmark.log"
$mpi_command python3 /workspace/bert/run_re.py \
--do_prepare=true \
--do_train=true \
--do_eval=true \
--do_predict=true \
--task_name="chemprot" \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint="$BERT_DIR/bert_model.ckpt" \
--num_train_epochs=$epochs \
--data_dir=$DATASET_DIR \
--output_dir=$res_dir \
--train_batch_size=$train_batch_size \
--max_seq_length=$seq_length \
"$use_hvd" "$use_fp16" --use_xla $case_flag |& tee $tmp_file
perf=`cat $tmp_file | grep -F 'Throughput Average (sentences/sec) =' | head -1 | awk -F'= ' '{print $2}' | awk -F' sen' '{print $1}'`
echo "${use_fp16} $seq_length $train_batch_size $perf" >> $LOGFILE
done
done
done
else
echo "Benchmarking for " $task "currently not supported. Sorry!"
fi |
TensorFlow/Segmentation/VNet | VNet | main | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import os
import pickle
import shutil
import horovod.tensorflow as hvd
import tensorflow as tf
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from hooks.profiling_hook import ProfilingHook
from hooks.train_hook import TrainHook
from utils.cmd_util import PARSER
from utils.data_loader import MSDDataset
from utils.model_fn import vnet_v2
def main(_):
tf.get_logger().setLevel(logging.ERROR)
hvd.init()
FLAGS = PARSER.parse_args()
backends = []
if hvd.rank() == 0:
backends += [StdOutBackend(Verbosity.DEFAULT)]
if FLAGS.log_dir:
backends += [JSONStreamBackend(Verbosity.DEFAULT, FLAGS.log_dir)]
DLLogger.init(backends=backends)
for key in vars(FLAGS):
DLLogger.log(step="PARAMETER", data={str(key): vars(FLAGS)[key]})
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
os.environ['TF_DISABLE_NVTX_RANGES'] = '1'
dataset = MSDDataset(json_path=os.path.join(FLAGS.data_dir, 'dataset.json'),
dst_size=FLAGS.input_shape,
seed=FLAGS.seed,
interpolator=FLAGS.resize_interpolator,
data_normalization=FLAGS.data_normalization,
batch_size=FLAGS.batch_size,
train_split=FLAGS.train_split,
split_seed=FLAGS.split_seed)
FLAGS.labels = dataset.labels
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
if FLAGS.use_amp:
config.graph_options.rewrite_options.auto_mixed_precision = 1
run_config = tf.estimator.RunConfig(
save_summary_steps=None,
save_checkpoints_steps=None if FLAGS.benchmark else dataset.train_steps * FLAGS.train_epochs,
save_checkpoints_secs=None,
tf_random_seed=None,
session_config=config,
keep_checkpoint_max=1)
estimator = tf.estimator.Estimator(
model_fn=vnet_v2,
model_dir=FLAGS.model_dir if hvd.rank() == 0 else None,
config=run_config,
params=FLAGS)
train_hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if 'train' in FLAGS.exec_mode:
steps = dataset.train_steps * FLAGS.train_epochs
if FLAGS.benchmark:
steps = FLAGS.warmup_steps * 2
if hvd.rank() == 0:
train_hooks += [ProfilingHook(FLAGS.warmup_steps, FLAGS.batch_size * hvd.size(), DLLogger)]
else:
if hvd.rank() == 0:
train_hooks += [TrainHook(FLAGS.log_every, DLLogger)]
estimator.train(
input_fn=lambda: dataset.train_fn(FLAGS.augment),
steps=steps,
hooks=train_hooks)
if 'evaluate' in FLAGS.exec_mode:
if hvd.rank() == 0:
if FLAGS.train_split >= 1.0:
raise ValueError("Missing argument: --train_split < 1.0")
result = estimator.evaluate(
input_fn=dataset.eval_fn,
steps=dataset.eval_steps,
hooks=[])
DLLogger.log(step=tuple(), data={'background_dice': str(result['background dice']),
'anterior_dice': str(result['Anterior dice']),
'posterior_dice': str(result['Posterior dice'])})
if 'predict' in FLAGS.exec_mode:
count = 1
hooks = []
if hvd.rank() == 0:
if FLAGS.benchmark:
count = math.ceil((FLAGS.warmup_steps * 2) / dataset.test_steps)
hooks += [ProfilingHook(FLAGS.warmup_steps, FLAGS.batch_size * hvd.size(), DLLogger, training=False)]
predictions = estimator.predict(input_fn=lambda: dataset.test_fn(count=count),
hooks=hooks)
pred = [p['prediction'] for p in predictions]
predict_path = os.path.join(FLAGS.model_dir, 'predictions')
if os.path.exists(predict_path):
shutil.rmtree(predict_path)
os.makedirs(predict_path)
pickle.dump(pred, open(os.path.join(predict_path, 'predictions.pkl'), 'wb'))
if __name__ == '__main__':
tf.compat.v1.app.run()
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/configuration | configuration | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .configuration import SynGenDatasetFeatureSpec, SynGenConfiguration
|
PyTorch/SpeechRecognition/Jasper/configs | configs | jasper10x5dr_speedp-offline | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: "Jasper"
labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"]
input_val:
audio_dataset: &val_dataset
sample_rate: &sample_rate 16000
trim_silence: true
normalize_transcripts: true
filterbank_features: &val_features
normalize: per_feature
sample_rate: *sample_rate
window_size: 0.02
window_stride: 0.01
window: hann
n_filt: &n_filt 64
n_fft: 512
frame_splicing: &frame_splicing 1
dither: 0.00001
pad_align: 16
# For training we keep samples < 16.7s and apply augmentation
input_train:
audio_dataset:
<<: *val_dataset
max_duration: 16.7
ignore_offline_speed_perturbation: false
filterbank_features:
<<: *val_features
max_duration: 16.7
spec_augment:
freq_masks: 0
max_freq: 20
time_masks: 0
max_time: 75
jasper:
encoder:
init: xavier_uniform
in_feats: *n_filt
frame_splicing: *frame_splicing
activation: relu
use_conv_masks: true
blocks:
- &Conv1
filters: 256
repeat: 1
kernel_size: [11]
stride: [2]
dilation: [1]
dropout: 0.2
residual: false
- &B1
filters: 256
repeat: 5
kernel_size: [11]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B1
- &B2
filters: 384
repeat: 5
kernel_size: [13]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B2
- &B3
filters: 512
repeat: 5
kernel_size: [17]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B3
- &B4
filters: 640
repeat: 5
kernel_size: [21]
stride: [1]
dilation: [1]
dropout: 0.3
residual: true
residual_dense: true
- *B4
- &B5
filters: 768
repeat: 5
kernel_size: [25]
stride: [1]
dilation: [1]
dropout: 0.3
residual: true
residual_dense: true
- *B5
- &Conv2
filters: 896
repeat: 1
kernel_size: [29]
stride: [1]
dilation: [2]
dropout: 0.4
residual: false
- &Conv3
filters: &enc_feats 1024
repeat: 1
kernel_size: [1]
stride: [1]
dilation: [1]
dropout: 0.4
residual: false
decoder:
in_feats: *enc_feats
init: xavier_uniform
|
TensorFlow/Segmentation/UNet_Medical | UNet_Medical | .gitignore | .idea/
.ipynb_checkpoints
/_python_build
*.pyc
__pycache__
*.swp
/results
*.zip
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts/import_utils | import_utils | __init__ | #!/usr/bin/env python3
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from .waveglow import load_waveglow
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading | dataloading | split_tfrecords_multihot_dataset | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import tensorflow as tf
import os
import glob
import json
import numpy as np
import tqdm
def serialize_composite(rt):
components = tf.nest.flatten(rt, expand_composites=True)
tensor = tf.stack([tf.io.serialize_tensor(t) for t in components])
return tf.io.serialize_tensor(tensor)
def deserialize_composite(serialized, type_spec):
data = tf.io.parse_tensor(serialized, tf.string)
component_specs = tf.nest.flatten(type_spec, expand_composites=True)
components = [tf.io.parse_tensor(data[i], out_type=spec.dtype)
for i, spec in enumerate(component_specs)]
return tf.nest.pack_sequence_as(type_spec, components, expand_composites=True)
def length_filename(dataset_dir):
return f'{dataset_dir}/length.json'
class PrebatchStreamWriter:
def __init__(self, dst_dir, dtype, feature_name='data', multihot=False, batches_per_file=1):
self.dst_dir = dst_dir
os.makedirs(dst_dir, exist_ok=True)
self.dtype = dtype
self.feature_name = feature_name
self.multihot = multihot
self.batches_per_file = batches_per_file
self.writer = None
self._file_idx = -1
self._batches_saved = 0
def _new_file(self):
if self.writer:
self.writer.close()
self._file_idx += 1
self.writer = tf.io.TFRecordWriter(os.path.join(self.dst_dir, f'data_{self._file_idx}.tfrecords'))
def save(self, prebatch):
if self._batches_saved % self.batches_per_file == 0:
self._new_file()
if self.multihot:
serialized = serialize_composite(tf.cast(prebatch, self.dtype)).numpy()
else:
if isinstance(prebatch, tf.RaggedTensor):
prebatch = prebatch.to_tensor()
serialized = tf.io.serialize_tensor(tf.cast(prebatch, dtype=self.dtype)).numpy()
features = tf.train.Features(feature={
self.feature_name: tf.train.Feature(bytes_list=tf.train.BytesList(value=[serialized]))
})
example = tf.train.Example(features=features)
self.writer.write(example.SerializeToString())
self._batches_saved += 1
def close(self):
self.writer.close()
def create_writer(dst_dir, dtype, feature_name='data', multihot=False,
format='tfrecords', num_features=1, batches_per_file=1):
if format == 'tfrecords':
writer = PrebatchStreamWriter(dst_dir=dst_dir, dtype=dtype, multihot=multihot, batches_per_file=batches_per_file)
metadata = dict(format=format, dtype=dtype.name, multihot=multihot,
feature_name=feature_name,num_features=num_features, batches_per_file=batches_per_file)
with open(os.path.join(dst_dir, 'format.json'), 'w') as f:
json.dump(metadata, f)
return writer
else:
raise ValueError(f'Unknown feature format: {format}')
def create_reader(src_dir, batch_size, world_size=1, rank=0, data_parallel=True):
with open(os.path.join(src_dir, 'format.json')) as f:
metadata = json.load(f)
if metadata['format'] == 'tfrecords':
reader = SingleFeatureTFRecordsFileReader(dst_dir=src_dir, batch_size=batch_size,
dtype=tf.dtypes.as_dtype(metadata['dtype']),
multihot=metadata['multihot'],
feature_name=metadata['feature_name'],
num_features=metadata['num_features'],
world_size=world_size, rank=rank, data_parallel=data_parallel)
return reader
else:
raise ValueError(f'Unknown feature format: {metadata["format"]}')
class SingleFeatureTFRecordsFileReader:
def __init__(self, dst_dir, batch_size, dtype, rank=0, world_size=1,
num_features=1, feature_name='data', multihot=False,
data_parallel=True, parallel_calls=4):
self.filenames = glob.glob(os.path.join(dst_dir, 'data_*.tfrecords'))
self.feature_name = feature_name
self.multihot = multihot
self.batch_size = batch_size
self.num_features = num_features
self.dtype = dtype
self.feature_description = {self.feature_name: tf.io.FixedLenFeature([], tf.string, default_value='')}
self.data_parallel = data_parallel
self.parallel_calls = parallel_calls
self.rank = rank
self.world_size = world_size
if self.data_parallel:
local_batch_size = int(self.batch_size / world_size)
batch_sizes_per_gpu = [local_batch_size] * world_size
indices = tuple(np.cumsum([0] + list(batch_sizes_per_gpu)))
self.dp_begin_idx = indices[rank]
self.dp_end_idx = indices[rank + 1]
def __len__(self):
pass
def _data_parallel_split(self, x):
return x[self.dp_begin_idx:self.dp_end_idx, ...]
def _parse_function(self, proto):
parsed = tf.io.parse_single_example(proto, self.feature_description)
if self.multihot:
rt_spec = tf.RaggedTensorSpec(dtype=tf.int32, shape=[self.batch_size, None],
row_splits_dtype=tf.int32, ragged_rank=1)
tensor = parsed[self.feature_name]
tensor = deserialize_composite(serialized=tensor, type_spec=rt_spec)
else:
tensor = tf.io.parse_tensor(parsed[self.feature_name], out_type=self.dtype)
tensor = tf.reshape(tensor, shape=[self.batch_size, self.num_features])
if self.data_parallel:
tensor = self._data_parallel_split(tensor)
return tensor
def op(self):
num_parallel_reads = 8
dataset = tf.data.TFRecordDataset(self.filenames, num_parallel_reads=num_parallel_reads)
dataset = dataset.map(self._parse_function, num_parallel_calls=self.parallel_calls, deterministic=True)
dataset = dataset.prefetch(buffer_size=1)
dataset = dataset.repeat()
return dataset
class SplitTFRecordsDataset:
def __init__(self, dataset_dir, feature_ids, num_numerical, batch_size, world_size, rank):
self.dataset_dir = dataset_dir
self.feature_ids = feature_ids
self.num_numerical = num_numerical
self.batch_size = batch_size
self.world_size = world_size
self.rank = rank
self.numerical_reader = create_reader(src_dir=os.path.join(dataset_dir, 'numerical'),
world_size=world_size, rank=rank, batch_size=batch_size,
data_parallel=True)
self.label_reader = create_reader(src_dir=os.path.join(dataset_dir, 'label'),
world_size=world_size, rank=rank, data_parallel=True,
batch_size=batch_size)
self.categorical_readers = []
for feature_id in feature_ids:
reader = create_reader(src_dir=os.path.join(dataset_dir, f'categorical_{feature_id}'),
batch_size=batch_size, data_parallel=False)
self.categorical_readers.append(reader)
filename = length_filename(self.dataset_dir)
with open(filename) as f:
self.length = json.load(f)
def __len__(self):
return self.length
def op(self):
categorical_tf_datasets = tuple(d.op() for d in self.categorical_readers)
features_datasets = (self.numerical_reader.op(), categorical_tf_datasets)
structure_to_zip = (features_datasets, self.label_reader.op())
dataset = tf.data.Dataset.zip(structure_to_zip)
return dataset
@staticmethod
def generate(src_train, src_test, feature_spec, dst_dir, dst_feature_spec, prebatch_size, max_batches_train, max_batches_test):
local_table_sizes = feature_spec.get_categorical_sizes()
names = feature_spec.get_categorical_feature_names()
local_table_hotness = [feature_spec.feature_spec[name].get('hotness', 1) for name in names]
os.makedirs(dst_dir, exist_ok=True)
num_files = 1
feature_spec.to_yaml(output_path=os.path.join(dst_dir, dst_feature_spec))
sources = [(src_train, 'train', max_batches_train), (src_test, 'test', max_batches_test)]
for src, dst_suffix, max_batches in sources:
num_batches = min(len(src), max_batches)
if num_batches % num_files != 0:
raise ValueError('The length of the dataset must be evenly divided by the number of TFRecords files')
dst_subdir = os.path.join(dst_dir, dst_suffix)
numerical_writer = create_writer(dst_dir=os.path.join(dst_subdir, 'numerical'), dtype=tf.float16,
num_features=feature_spec.get_number_of_numerical_features(),
batches_per_file=num_batches // num_files)
label_writer = create_writer(dst_dir=os.path.join(dst_subdir, 'label'), dtype=tf.int8,
batches_per_file=num_batches // num_files)
categorical_writers = []
for i, (hotness, cardinality) in enumerate(zip(local_table_hotness, local_table_sizes)):
# TODO: possibly optimize the dtype by using cardinality here
writer = create_writer(dst_dir=os.path.join(dst_subdir, f'categorical_{i}'), dtype=tf.int32,
multihot=hotness > 1,
batches_per_file=num_batches // num_files)
categorical_writers.append(writer)
with open(length_filename(dst_subdir), 'w') as f:
json.dump(num_batches, f)
for batch_idx, batch in tqdm.tqdm(enumerate(src.op()),
total=max_batches,
desc=f'Generating the {dst_suffix} data'):
print('writing batch: ', batch_idx)
if batch_idx == max_batches:
break
print(batch_idx)
(numerical, categorical), label = batch
if label.shape[0] != prebatch_size:
raise ValueError(f'Source dataset batch size ({label.shape[0]}) '
f'different from the prebatch size ({prebatch_size}). Unsupported.')
numerical_writer.save(numerical)
label_writer.save(label)
for writer, feature in zip(categorical_writers, categorical):
writer.save(feature)
numerical_writer.close()
label_writer.close()
for writer in categorical_writers:
writer.close()
|
PyTorch/SpeechRecognition/wav2vec2/utils | utils | convert_librispeech | #!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import glob
import multiprocessing
import json
import pandas as pd
from preprocessing_utils import parallel_preprocess
parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.')
parser.add_argument('--input_dir', type=str, required=True,
help='LibriSpeech collection input dir')
parser.add_argument('--dest_dir', type=str, required=True,
help='Output dir')
parser.add_argument('--output_json', type=str, default='./',
help='name of the output json file.')
parser.add_argument('-s', '--speed', type=float, nargs='*',
help='Speed perturbation ratio')
parser.add_argument('--target_sr', type=int, default=None,
help='Target sample rate. '
'defaults to the input sample rate')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite file if exists')
parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(),
help='Number of threads to use when processing audio files')
args = parser.parse_args()
args.input_dir = args.input_dir.rstrip('/')
args.dest_dir = args.dest_dir.rstrip('/')
def build_input_arr(input_dir):
txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'),
recursive=True)
input_data = []
for txt_file in txt_files:
rel_path = os.path.relpath(txt_file, input_dir)
with open(txt_file) as fp:
for line in fp:
fname, _, transcript = line.partition(' ')
input_data.append(dict(input_relpath=os.path.dirname(rel_path),
input_fname=fname+'.flac',
transcript=transcript))
return input_data
print("[%s] Scaning input dir..." % args.output_json)
dataset = build_input_arr(input_dir=args.input_dir)
print("[%s] Converting audio files..." % args.output_json)
dataset = parallel_preprocess(dataset=dataset,
input_dir=args.input_dir,
dest_dir=args.dest_dir,
target_sr=args.target_sr,
speed=args.speed,
overwrite=args.overwrite,
parallel=args.parallel)
print("[%s] Generating json..." % args.output_json)
df = pd.DataFrame(dataset, dtype=object)
# Save json with python. df.to_json() produces back slashed in file paths
dataset = df.to_dict(orient='records')
with open(args.output_json, 'w') as fp:
json.dump(dataset, fp, indent=2)
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_inference_runner | triton_inference_runner | runner | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from ..utils import TritonClientProtocol, parse_server_url
from .grpc import AsyncInferenceRunner as AsyncGRPCRunner
from .grpc import SyncInferenceRunner as SyncGRPCRunner
from .http import AsyncInferenceRunner as AsyncHTPPRunner
from .http import SyncInferenceRunner as SyncHTTPRunner
class TritonInferenceRunner:
async_runners = {
TritonClientProtocol.GRPC: AsyncGRPCRunner,
TritonClientProtocol.HTTP: AsyncHTPPRunner,
}
sync_runners = {
TritonClientProtocol.GRPC: SyncGRPCRunner,
TritonClientProtocol.HTTP: SyncHTTPRunner,
}
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
dataloader_fn,
verbose: bool = False,
response_wait_time: Optional[float] = None,
max_unresponded_requests: int = 128,
synchronous: bool = False,
):
protocol, host, port = parse_server_url(server_url)
server_url = f"{host}:{port}"
if synchronous:
sync_runner_cls = TritonInferenceRunner.sync_runners[protocol]
self._runner = sync_runner_cls(
server_url,
model_name,
model_version,
dataloader=dataloader_fn(),
verbose=verbose,
response_wait_time=response_wait_time,
)
else:
async_runner_cls = TritonInferenceRunner.async_runners[protocol]
self._runner = async_runner_cls(
server_url,
model_name,
model_version,
dataloader=dataloader_fn(),
verbose=verbose,
response_wait_time=response_wait_time,
max_unresponded_requests=max_unresponded_requests,
)
def __iter__(self):
return self._runner.__iter__()
|
PyTorch/LanguageModeling/BERT/triton/runner | runner | triton | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Framework, Paths
class Triton:
"""
Triton Inference Server helper class
"""
image = "nvcr.io/nvidia/tritonserver"
tag = "py3"
class LOAD_MODE:
"""
Loading mode available in Triton
"""
POLL = "poll"
EXPLICIT = "explicit"
@staticmethod
def container_image(container_version: str):
"""
Container image based on version
Args:
container_version: Version of container to be used
Returns:
Image name with tag
"""
return f"{Triton.image}:{container_version}-{Triton.tag}"
@staticmethod
def command(
framework: str,
repository_path: str,
strict_mode: bool = False,
poll_model: bool = False,
metrics: bool = False,
verbose: bool = False,
):
"""
Command to run Triton Inference Server inside container
Args:
framework: Framework used for model
repository_path: Path to model repository
strict_mode: Flag to use strict model config
poll_model: Poll model
metrics: Enable GPU metrics (disable for MIG)
verbose: Use verbose mode logging
Returns:
"""
triton_command = f"tritonserver --model-store={repository_path}"
if poll_model:
triton_command += " --model-control-mode=poll --repository-poll-secs 5"
else:
triton_command += " --model-control-mode=explicit"
if not strict_mode:
triton_command += " --strict-model-config=false"
if not metrics:
triton_command += " --allow-metrics=false --allow-gpu-metrics=false"
if verbose:
triton_command += " --log-verbose 1"
if framework in (Framework.TensorFlow1, Framework.TensorFlow2):
version = 1 if framework == Framework.TensorFlow1 else 2
triton_command += f" --backend-config=tensorflow,version={version}"
return triton_command
@staticmethod
def library_path(framework: str):
"""
Obtain custom library path for framework
Args:
framework: Framework used for model
Returns:
Path to additional libraries needed by framework
"""
paths = {
Framework.PyTorch.name: "/opt/tritonserver/backends/pytorch",
Framework.TensorFlow1.name: "/opt/tritonserver/backends/tensorflow1",
Framework.TensorFlow2.name: "/opt/tritonserver/backends/tensorflow2",
}
return paths[framework]
@staticmethod
def custom_library_path_remote() -> str:
"""
Path to custom library mounted in Triton container
Returns:
Path to shared library with custom operations
"""
return f"{Paths.LIBRARIES_PATH}/libcustomops.so"
@staticmethod
def custom_library_path_local(libs_dir: pathlib.Path) -> pathlib.Path:
"""
Path to custom library in local path
Args:
libs_dir: path to libraries directory
Returns:
Path to shared library with custom operations
"""
return libs_dir / "libcustomops.so"
|
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda | bermuda | tensorrt | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except (ImportError, Exception) as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
"""
documentation:
https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
"""
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = engine.get_binding_dtype(binding_idx)
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
class TensorRTSaver(BaseSaver):
def __init__(self):
pass
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving TensorRT engine to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
engine: "trt.ICudaEngine" = model.handle
with model_path.open("wb") as fh:
fh.write(engine.serialize())
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
y_pred_host[name] = np.zeros(shape, dtype=trt.nptype(self._model.outputs[name].dtype))
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
savers.register_extension(Format.TRT.value, TensorRTSaver)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_resnet_v1_fpn_feature_extractor | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Feature Pyramid Network (FPN) feature extractors based on Resnet v1.
See https://arxiv.org/abs/1708.02002 for details.
"""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import resnet_v1
slim = tf.contrib.slim
class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD FPN feature extractor based on Resnet v1 architecture."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_base_fn,
resnet_scope_name,
fpn_scope_name,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
UNUSED currently.
min_depth: minimum feature extractor depth. UNUSED Currently.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
resnet_base_fn: base resnet network to use.
resnet_scope_name: scope name under which to construct resnet
fpn_scope_name: scope name under which to construct the feature pyramid
network.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: On supplying invalid arguments for unused arguments.
"""
super(_SSDResnetV1FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
if self._depth_multiplier != 1.0:
raise ValueError('Only depth 1.0 is supported, found: {}'.
format(self._depth_multiplier))
if self._use_explicit_padding is True:
raise ValueError('Explicit padding is not a valid option.')
self._resnet_base_fn = resnet_base_fn
self._resnet_scope_name = resnet_scope_name
self._fpn_scope_name = fpn_scope_name
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
def preprocess(self, resized_inputs):
"""SSD preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge.
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def _filter_features(self, image_features):
# TODO(rathodv): Change resnet endpoint to strip scope prefixes instead
# of munging the scope here.
filtered_image_features = dict({})
for key, feature in image_features.items():
feature_name = key.split('/')[-1]
if feature_name in ['block1', 'block2', 'block3', 'block4']:
filtered_image_features[feature_name] = feature
return filtered_image_features
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Raises:
ValueError: depth multiplier is not supported.
"""
if self._depth_multiplier != 1.0:
raise ValueError('Depth multiplier not supported.')
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
with tf.variable_scope(
self._resnet_scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = self._resnet_base_fn(
inputs=ops.pad_to_multiple(preprocessed_inputs,
self._pad_to_multiple),
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
store_non_strided_activations=True,
scope=scope)
image_features = self._filter_features(image_features)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope(self._fpn_scope_name,
reuse=self._reuse_weights):
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=self._additional_layer_depth)
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(
fpn_features['top_down_block{}'.format(level - 1)])
last_feature_map = fpn_features['top_down_block{}'.format(
base_fpn_max_level - 1)]
# Construct coarse features
for i in range(base_fpn_max_level, self._fpn_max_level):
last_feature_map = slim.conv2d(
last_feature_map,
num_outputs=self._additional_layer_depth,
kernel_size=[3, 3],
stride=2,
padding='SAME',
scope='bottom_up_block{}'.format(i))
feature_maps.append(last_feature_map)
return feature_maps
class SSDResnet50V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet50 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
UNUSED currently.
min_depth: minimum feature extractor depth. UNUSED Currently.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet50V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_50,
'resnet_v1_50',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class SSDResnet101V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet101 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet101 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
UNUSED currently.
min_depth: minimum feature extractor depth. UNUSED Currently.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet101V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_101,
'resnet_v1_101',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class SSDResnet152V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet152 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet152 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
UNUSED currently.
min_depth: minimum feature extractor depth. UNUSED Currently.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet152V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_152,
'resnet_v1_152',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/inference | inference | native | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: inference.inference.run_inference
config:
checkpoint: ???
batch_size: 64
precision: fp32
device: cuda |
PyTorch/Recommendation/DLRM/dlrm/cuda_src | cuda_src | gather_gpu_fused | #include <iostream>
#include <cuda_runtime_api.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/cuda/CUDAContext.h>
#define CHK_CUDA(expression) \
{ \
cudaError_t status = (expression); \
if (status != cudaSuccess) { \
std::cerr << "Error in file: " << __FILE__ << ", on line: " << __LINE__ << ": " << cudaGetErrorString(status) \
<< std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
// only 4 element vectorized types are implemented - can be done for other types
// load/store by "mask" vars
// assignments by "val" vars
template <class DTYPE>
struct VecType4{};
template <>
struct VecType4<__half> {
typedef float2 Type;
typedef struct __align__(8) {
__half x;
__half y;
__half z;
__half w;
} half4;
union Data {
half4 val;
Type mask;
} data;
__device__ VecType4() {
data.mask = make_float2(0.0f, 0.0f);
}
__device__ VecType4& operator=(float4 &in) {
data.val.x = __float2half(in.x);
data.val.y = __float2half(in.y);
data.val.z = __float2half(in.z);
data.val.w = __float2half(in.w);
return *this;
}
__device__ VecType4& operator=(half4 &in) {
data.val = in;
return *this;
}
};
template <>
struct VecType4<float> {
typedef float4 Type;
union Data {
Type val;
Type mask;
} data;
__device__ VecType4() {
data.val.x = 0.0f;
data.val.y = 0.0f;
data.val.z = 0.0f;
data.val.w = 0.0f;
}
__device__ VecType4& operator=(VecType4<__half>::half4 &in) {
data.val.x = __half2float(in.x);
data.val.y = __half2float(in.y);
data.val.z = __half2float(in.z);
data.val.w = __half2float(in.w);
return *this;
}
__device__ VecType4& operator=(float4 &in) {
data.val = in;
return *this;
}
};
// -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__
// above default build params to Torch extensions requires this extensive juggling around
template <typename ITYPE, typename OTYPE, typename std::enable_if<(std::is_same<ITYPE, float>::value &&
std::is_same<OTYPE, __half>::value),
ITYPE>::type * = nullptr>
__device__ __host__ __forceinline__ OTYPE fp_type_cast(ITYPE input) {
return __float2half(input);
}
template <typename ITYPE, typename OTYPE, typename std::enable_if<(std::is_same<ITYPE, __half>::value &&
std::is_same<OTYPE, float>::value),
ITYPE>::type * = nullptr>
__device__ __host__ __forceinline__ OTYPE fp_type_cast(ITYPE input) {
return __half2float(input);
}
template <typename ITYPE, typename OTYPE, typename std::enable_if<std::is_same<ITYPE, OTYPE>::value,
ITYPE>::type * = nullptr>
__device__ __host__ __forceinline__ OTYPE fp_type_cast(ITYPE input) {
return input;
}
// this kernel assumes embedding vector_width of 128
template <typename ITYPE, typename OTYPE>
__global__ void lookupEmbeddings(ITYPE *embeddingTable, int64_t *offsets,
int64_t *indices, OTYPE *outLookup, int batch_size) {
typedef typename VecType4<ITYPE>::Type invec4;
typedef typename VecType4<OTYPE>::Type outvec4;
int vector_width = 128;
const int fea_count = 26;
int lane_id = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
int num_warps = blockDim.x / warpSize;
int start_idx = warp_id * fea_count + lane_id + blockIdx.x * (num_warps * fea_count);
int64_t lane_offset = 0;
if (lane_id < fea_count)
lane_offset = offsets[lane_id];
while (1) {
int64_t lookup_idx = -1;
if (lane_id < fea_count && start_idx < (batch_size * fea_count)) {
lookup_idx = indices[start_idx] + lane_offset;
}
if (__all_sync(0xffffffff, lookup_idx == -1))
break;
for (int i = 0; i < fea_count; i++) {
int64_t table_idx = __shfl_sync(0xffffffff, lookup_idx, i);
if (table_idx != -1) {
invec4 *vec_embedding_table = reinterpret_cast<invec4*>(embeddingTable);
outvec4 *vec_embedding_out = reinterpret_cast<outvec4*>(outLookup);
int64_t out_idx = start_idx - lane_id + i;
out_idx *= vector_width;
int vector_inst_width = 4; // 128 bit loads, 4-floats
int64_t vec_in_idx = ((table_idx * vector_width) + (lane_id * vector_inst_width)) >> 2;
int64_t vec_out_idx = (out_idx + (lane_id * vector_inst_width)) >> 2;
VecType4<ITYPE> input_elements;
input_elements.data.mask = vec_embedding_table[vec_in_idx];
VecType4<OTYPE> output_elements;
output_elements = input_elements.data.val;
vec_embedding_out[vec_out_idx] = output_elements.data.mask;
}
}
start_idx += (gridDim.x * num_warps * fea_count);
}
}
__global__ void indices_offset_addition(int64_t *indices, int64_t *offsets, int64_t *output_indices,
int batch_size) {
const int fea_count = 26;
__shared__ int64_t smem_offsets[fea_count];
if (threadIdx.x < fea_count) {
smem_offsets[threadIdx.x] = offsets[threadIdx.x];
}
__syncthreads();
int start_idx = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = start_idx; i < (batch_size * fea_count); i+=(gridDim.x * blockDim.x)) {
output_indices[i] = indices[i] + smem_offsets[i % fea_count];
}
}
template <typename ITYPE, typename OTYPE>
__global__ void gradient_copy_kernel(ITYPE *input_gradient, OTYPE *output_gradient, int64_t num_elements) {
typedef typename VecType4<ITYPE>::Type invec4;
typedef typename VecType4<OTYPE>::Type outvec4;
invec4 *vec_input_gradient = reinterpret_cast<invec4*>(input_gradient);
outvec4 *vec_output_gradient = reinterpret_cast<outvec4*>(output_gradient);
int64_t start_idx = threadIdx.x + blockIdx.x * blockDim.x;
for (int64_t i = start_idx; i < num_elements / 4; i+= (gridDim.x * blockDim.x)) {
VecType4<ITYPE> input_elements;
input_elements.data.mask = vec_input_gradient[i];
VecType4<OTYPE> output_elements;
output_elements = input_elements.data.val;
vec_output_gradient[i] = output_elements.data.mask;
}
int elements_left = num_elements % 4;
if (threadIdx.x == 0 && elements_left != 0) {
while(elements_left) {
int64_t idx = num_elements - elements_left;
output_gradient[idx] = fp_type_cast<ITYPE, OTYPE>(input_gradient[idx]);
elements_left--;
}
}
}
// kernels are fully instantiation type compatible float<->float , float<->Half, half<->half
// but their runner functions are not instantiated for all types
template <typename ITYPE, typename OTYPE>
void gather_gpu_fused_fwd(ITYPE *embeddingTablePtr, int64_t *indices_offset, int64_t *lookup_indices,
OTYPE *outputPtr, int batch_size) {};
template <>
void gather_gpu_fused_fwd(float *embeddingTablePtr, int64_t *indices_offset, int64_t *lookup_indices,
c10::Half *outputPtr, int batch_size) {
auto deviceProp = at::cuda::getCurrentDeviceProperties();
dim3 block(deviceProp->maxThreadsPerBlock, 1, 1);
dim3 grid((deviceProp->multiProcessorCount * deviceProp->maxThreadsPerMultiProcessor) / deviceProp->maxThreadsPerBlock,
1, 1);
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
lookupEmbeddings<float, __half><<<grid, block, 0, stream>>>(embeddingTablePtr, indices_offset, lookup_indices, (__half*)outputPtr, batch_size);
CHK_CUDA(cudaGetLastError());
}
template <>
void gather_gpu_fused_fwd(float *embeddingTablePtr, int64_t *indices_offset, int64_t *lookup_indices,
float *outputPtr, int batch_size) {
auto deviceProp = at::cuda::getCurrentDeviceProperties();
dim3 block(deviceProp->maxThreadsPerBlock, 1, 1);
dim3 grid((deviceProp->multiProcessorCount * deviceProp->maxThreadsPerMultiProcessor) / deviceProp->maxThreadsPerBlock,
1, 1);
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
lookupEmbeddings<float, float><<<grid, block, 0, stream>>>(embeddingTablePtr, indices_offset, lookup_indices, outputPtr, batch_size);
CHK_CUDA(cudaGetLastError());
}
template <>
void gather_gpu_fused_fwd(c10::Half *embeddingTablePtr, int64_t *indices_offset, int64_t *lookup_indices,
c10::Half *outputPtr, int batch_size) {
auto deviceProp = at::cuda::getCurrentDeviceProperties();
dim3 block(deviceProp->maxThreadsPerBlock, 1, 1);
dim3 grid((deviceProp->multiProcessorCount * deviceProp->maxThreadsPerMultiProcessor) / deviceProp->maxThreadsPerBlock,
1, 1);
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
lookupEmbeddings<__half, __half><<<grid, block, 0, stream>>>((__half*)embeddingTablePtr, indices_offset, lookup_indices, (__half*)outputPtr, batch_size);
CHK_CUDA(cudaGetLastError());
}
template <typename ITYPE, typename OTYPE>
void gather_gpu_fused_bwd(ITYPE *input_gradient, int64_t *lookup_indices, int64_t *offsets, OTYPE *out_gradient,
int64_t *out_indices, int batch_size, int num_features, int embed_vector_dim) {};
template <>
void gather_gpu_fused_bwd(c10::Half *input_gradient, int64_t *lookup_indices, int64_t *offsets, float *out_gradient,
int64_t *out_indices, int batch_size, int num_features, int embed_vector_dim) {
// offset addition to indices
auto deviceProp = at::cuda::getCurrentDeviceProperties();
dim3 block(deviceProp->maxThreadsPerBlock, 1, 1);
dim3 grid((deviceProp->multiProcessorCount * deviceProp->maxThreadsPerMultiProcessor) / deviceProp->maxThreadsPerBlock,
1, 1);
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
// indices - offset addition kernel
indices_offset_addition<<<grid, block, 0, stream>>>(lookup_indices, offsets, out_indices, batch_size);
CHK_CUDA(cudaGetLastError());
gradient_copy_kernel<__half, float><<<grid, block, 0, stream>>>((__half *)input_gradient, out_gradient, (int64_t)batch_size * num_features * embed_vector_dim );
CHK_CUDA(cudaGetLastError());
}
template <>
void gather_gpu_fused_bwd(float *input_gradient, int64_t *lookup_indices, int64_t *offsets, float *out_gradient,
int64_t *out_indices, int batch_size, int num_features, int embed_vector_dim) {
// offset addition to indices
auto deviceProp = at::cuda::getCurrentDeviceProperties();
dim3 block(deviceProp->maxThreadsPerBlock, 1, 1);
dim3 grid((deviceProp->multiProcessorCount * deviceProp->maxThreadsPerMultiProcessor) / deviceProp->maxThreadsPerBlock,
1, 1);
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
// indices - offset addition kernel
indices_offset_addition<<<grid, block, 0, stream>>>(lookup_indices, offsets, out_indices, batch_size);
CHK_CUDA(cudaGetLastError());
gradient_copy_kernel<float, float><<<grid, block, 0, stream>>>(input_gradient, out_gradient, (int64_t)batch_size * num_features * embed_vector_dim );
CHK_CUDA(cudaGetLastError());
}
|
TensorFlow/Translation/GNMT/qa | qa | L1_joc_GNMT_inferbench_fp16 | set -o nounset
set -o errexit
set -o pipefail
cd ..
cp -r /data/joc/gnmt_tf/19.08 output_dir
# hack to work with pytorch dataset
sed -ie 's/ src_vocab_file = hparams.vocab_prefix + "." + hparams.src/ src_vocab_file = hparams.vocab_prefix/g' nmt.py
sed -ie 's/ tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt/ tgt_vocab_file = hparams.vocab_prefix/g' nmt.py
( python nmt.py --amp --data_dir=/data/pytorch/wmt16_de_en --output_dir=output_dir --mode=infer --infer_batch_size=512 2>&1 ) | tee log.log
python scripts/parse_log.py log.log | tee log.json
python << END
import json
import numpy as np
from pathlib import Path
baseline = 10254
bleu_baseline = 25.1
log = json.loads(Path('log.json').read_text())
speed = np.mean(log['eval_tokens_per_sec'])
bleu = log['bleu'][0]
print('Eval speed :', speed)
print('Baseline :', baseline)
print('Bleu :', bleu)
print('Bleu baseline :', bleu_baseline)
if speed < baseline * 0.9:
print("FAILED: speed ({}) doesn't match the baseline ({})".format(speed, baseline))
exit(1)
if bleu < bleu_baseline - 0.2:
print("FAILED: bleu ({}) doesn't match the baseline ({})".format(bleu, bleu_baseline))
exit(1)
print('SUCCESS')
END
|
PyTorch/Detection | Detection | README | # Object Detection
A natural progression from image classification would be classification and localization of the subject of the image. We can take this idea one step further and localize objects in a given image. Simply put, object detection refers to identifying which object(s) are there in an image.

Source: [Joseph Redmon, Ali Farhadi, “YOLO9000:Better, Faster, Stronger”](https://arxiv.org/abs/1612.08242)
## Introduction to Object Detection
In this section we will try to answer the following questions:
- What is object detection?
- Why is object detection important?
Object Detection is about not only detecting the presence and location of objects in images and videos, but also categorizing them into everyday objects. Oftentimes, there is a confusion between Image Classification and Object Detection. Simply put, the difference between them is the same as the difference between saying “This is a cat” and pointing to a cat and saying “There is the cat”.
To build autonomous systems, perception is the main challenge to be solved. Perception, in terms of autonomous systems refers to the ability of understanding the surroundings of the autonomous agent. This means that the agent needs to be able to figure out where and what objects are in its immediate vicinity.
Object detection can help keep humans away from toxic environments and hazardous situations. Challenges like garbage segregation, oil rig monitoring, nightly surveillance, cargo port maintenance and other high risk applications can be aided by robots/cameras which can detect objects. Essentially, any environment that requires visual inspection or analysis and is too dangerous for humans, object detection pipelines can be used to shield from any onsite hazard.
## How does it work?
While this has been a topic of research since before Deep Learning became mainstream, the best performing models today use one or more Deep Neural Networks.
Many architectures have networks pretrained on a different, simpler task, like Image Classification. As one can imagine, the inputs to this task can be images or videos, and the outputs are usually a set of bounding box coordinates that enclose each of the detected objects, as well as a class label for each detected object. With advances in research and the use of GPUs, it is possible to have object detection in real time with really impressive accuracies!

Source: [Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang Fu, Alexander C. Berg, “SSD: Single Shot MultiBox Detector”](https://arxiv.org/abs/1512.02325)
Single Shot Detector(SSD) is one of the state-of-the-art models for object detection and localization. It is based on a feed-forward convolutional neural network which always yields a fixed set of bounding boxes and a confidence score which represents how confident the network is about the bounding box containing an object. This is followed by a non maximum suppression step which outputs the final detections.
This network can be understood as two networks stacked on top of each other. The first network is a simple convolutional neural network which “extracts important features” which is the same as the image classification networks.
The second network is a multiscale feature map network built using another set of convolutional layers which are progressively smaller in size to allow detections on multiple scales. Simply put, the progressively smaller layers help detect objects of different sizes. Each layer in this set of layers outputs a number of detections and the final layer passes the output to a non maxima suppression which yields a final set of detections.
This Collection contains models and containers for object detection achieving state-of-the-art accuracies, tested and maintained by Nvidia.
## Applications and Use cases
### Autonomous Vehicles
Autonomous vehicles need to perceive and interact with real world objects in order to blend in with the environment. For instance a self-driving car needs to detect other vehicles, pedestrians, objects on the road, traffic signals and any and all obstacles on road and also understand the exact location of these objects. This perception information helps the agent avoid obstacles and understand how to interact with objects like traffic lights.
### Warehouses
Warehouses have many conveyor belts and segregation platforms. These tasks have traditionally been handled manually. As factories and warehouses scale, manually sorting and managing inventory cannot be scaled proportionally. Object detection pipelines deployed on robots can reduce operational friction and enable easy scale up solutions for businesses.
### Surveillance
Surveillance systems typically accumulate large volumes of video data which needs to be analyzed for all sorts of anomalies. Given the number of video sources even a small store has, analysing surveillance data from a large operation is a challenge. Object detection networks can help automate much of the pipeline to highlight sections where there is an object of interest. It can also be trained to identify anomalies in video streams.
### Hazardous tasks
Humans work at waste processing plants, nuclear power plants, oil rigs and around heavy machinery, which tend to be extremely hazardous and dangerous which pose health risks. These tasks essentially require human presence for visual tasks and confirmations which revolve around recognizing objects and relaying locations of objects. Risky tasks like these can be completed with a help of a object detection pipeline deployed on a camera or a robot which can reduce operational risks and costs. |
PyTorch/Forecasting/TFT/triton/runner | runner | config | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict, List, Optional, Union
import yaml
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .configuration import Configuration
from .core import DataObject
from .triton import Triton
class Checkpoint(DataObject):
"""
Checkpoint data placeholder
"""
name: str
url: str
def __init__(self, name: str, url: str):
self.name = name
self.url = url
class Dataset(DataObject):
"""
Dataset data placeholder
"""
name: str
def __init__(self, name: str):
self.name = name
class Config(DataObject):
"""
Configuration object for runner experiments
"""
def __init__(
self,
model_name: str,
framework: str,
container_version: str,
configurations: List[Configuration],
datasets_dir: str = "datasets",
datasets: List[Dataset] = None,
checkpoints: List[Checkpoint] = None,
triton_dockerfile: Optional[str] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: Optional[str] = Triton.LOAD_MODE.EXPLICIT,
):
"""
Args:
model_name: Name of model
framework: Framework used to create model
container_version: Version of Triton Inference Server container used for evaluation
configurations: List of experiments configurations
datasets_dir: Directory where datasets are stored
datasets: Datasets used for conversion/export
checkpoints: Checkpoints with trained model
triton_load_model_method: Triton Inference Server model loading mode
triton_dockerfile: Dockerfile for Triton to build custom image
triton_container_image: Custom image used for Triton Server - leave empty to use default or built from Dockerfile
triton_custom_operations: Path where custom operation library is stored
"""
self.model_name = model_name
self.framework = framework
self.container_version = container_version
self.configurations = configurations
self.datasets_dir = datasets_dir
self.datasets = datasets
self.checkpoints = checkpoints
self.triton_load_model_method = triton_load_model_method
self.triton_dockerfile = triton_dockerfile
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
def to_file(self, file_path: Union[pathlib.Path, str]) -> None:
"""
Save config data to file
Args:
file_path: path to file where config data is should be stored
Returns:
None
"""
data = self.to_dict()
with open(file_path, "w") as f:
yaml.safe_dump(data, f)
@staticmethod
def from_dict(config_data: Dict):
"""
Create configuration object from data stored in dictionary
Args:
config_data: dictionary with config data
Returns:
Config object
"""
configurations = []
for configuration_data in config_data["configurations"]:
configuration = Configuration(**configuration_data)
configurations.append(configuration)
checkpoints = []
for checkpoint_data in config_data.get("checkpoints", []):
checkpoint = Checkpoint(
name=checkpoint_data["name"],
url=checkpoint_data["url"],
)
checkpoints.append(checkpoint)
datasets = []
for dataset_data in config_data.get("datasets", []):
dataset = Dataset(name=dataset_data["name"])
datasets.append(dataset)
return Config(
model_name=config_data["model_name"],
framework=config_data["framework"],
container_version=config_data["container_version"],
configurations=configurations,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config_data.get("datasets_dir"),
triton_load_model_method=config_data["triton_load_model_method"],
triton_dockerfile=config_data.get("triton_dockerfile"),
triton_container_image=config_data.get("triton_container_image"),
triton_custom_operations=config_data.get("triton_custom_operations"),
)
@staticmethod
def from_file(file_path: Union[pathlib.Path, str]):
"""
Load experiment data from file
Args:
file_path: path to file where experiment data is stored
Returns:
Experiment object
"""
with open(file_path, "r") as f:
config_data = yaml.safe_load(f)
return Config.from_dict(config_data)
|
PyTorch/SpeechSynthesis/Tacotron2/filelists | filelists | ljs_mel_text_val_filelist | LJSpeech-1.1/mels/LJ022-0023.pt|The overwhelming majority of people in this country know how to sift the wheat from the chaff in what they hear and what they read.
LJSpeech-1.1/mels/LJ043-0030.pt|If somebody did that to me, a lousy trick like that, to take my wife away, and all the furniture, I would be mad as hell, too.
LJSpeech-1.1/mels/LJ005-0201.pt|as is shown by the report of the Commissioners to inquire into the state of the municipal corporations in eighteen thirty-five.
LJSpeech-1.1/mels/LJ001-0110.pt|Even the Caslon type when enlarged shows great shortcomings in this respect:
LJSpeech-1.1/mels/LJ003-0345.pt|All the committee could do in this respect was to throw the responsibility on others.
LJSpeech-1.1/mels/LJ007-0154.pt|These pungent and well-grounded strictures applied with still greater force to the unconvicted prisoner, the man who came to the prison innocent, and still uncontaminated,
LJSpeech-1.1/mels/LJ018-0098.pt|and recognized as one of the frequenters of the bogus law-stationers. His arrest led to that of others.
LJSpeech-1.1/mels/LJ047-0044.pt|Oswald was, however, willing to discuss his contacts with Soviet authorities. He denied having any involvement with Soviet intelligence agencies
LJSpeech-1.1/mels/LJ031-0038.pt|The first physician to see the President at Parkland Hospital was Dr. Charles J. Carrico, a resident in general surgery.
LJSpeech-1.1/mels/LJ048-0194.pt|during the morning of November twenty-two prior to the motorcade.
LJSpeech-1.1/mels/LJ049-0026.pt|On occasion the Secret Service has been permitted to have an agent riding in the passenger compartment with the President.
LJSpeech-1.1/mels/LJ004-0152.pt|although at Mr. Buxton's visit a new jail was in process of erection, the first step towards reform since Howard's visitation in seventeen seventy-four.
LJSpeech-1.1/mels/LJ008-0278.pt|or theirs might be one of many, and it might be considered necessary to "make an example."
LJSpeech-1.1/mels/LJ043-0002.pt|The Warren Commission Report. By The President's Commission on the Assassination of President Kennedy. Chapter seven. Lee Harvey Oswald:
LJSpeech-1.1/mels/LJ009-0114.pt|Mr. Wakefield winds up his graphic but somewhat sensational account by describing another religious service, which may appropriately be inserted here.
LJSpeech-1.1/mels/LJ028-0506.pt|A modern artist would have difficulty in doing such accurate work.
LJSpeech-1.1/mels/LJ050-0168.pt|with the particular purposes of the agency involved. The Commission recognizes that this is a controversial area
LJSpeech-1.1/mels/LJ039-0223.pt|Oswald's Marine training in marksmanship, his other rifle experience and his established familiarity with this particular weapon
LJSpeech-1.1/mels/LJ029-0032.pt|According to O'Donnell, quote, we had a motorcade wherever we went, end quote.
LJSpeech-1.1/mels/LJ031-0070.pt|Dr. Clark, who most closely observed the head wound,
LJSpeech-1.1/mels/LJ034-0198.pt|Euins, who was on the southwest corner of Elm and Houston Streets testified that he could not describe the man he saw in the window.
LJSpeech-1.1/mels/LJ026-0068.pt|Energy enters the plant, to a small extent,
LJSpeech-1.1/mels/LJ039-0075.pt|once you know that you must put the crosshairs on the target and that is all that is necessary.
LJSpeech-1.1/mels/LJ004-0096.pt|the fatal consequences whereof might be prevented if the justices of the peace were duly authorized
LJSpeech-1.1/mels/LJ005-0014.pt|Speaking on a debate on prison matters, he declared that
LJSpeech-1.1/mels/LJ012-0161.pt|he was reported to have fallen away to a shadow.
LJSpeech-1.1/mels/LJ018-0239.pt|His disappearance gave color and substance to evil reports already in circulation that the will and conveyance above referred to
LJSpeech-1.1/mels/LJ019-0257.pt|Here the tread-wheel was in use, there cellular cranks, or hard-labor machines.
LJSpeech-1.1/mels/LJ028-0008.pt|you tap gently with your heel upon the shoulder of the dromedary to urge her on.
LJSpeech-1.1/mels/LJ024-0083.pt|This plan of mine is no attack on the Court;
LJSpeech-1.1/mels/LJ042-0129.pt|No night clubs or bowling alleys, no places of recreation except the trade union dances. I have had enough.
LJSpeech-1.1/mels/LJ036-0103.pt|The police asked him whether he could pick out his passenger from the lineup.
LJSpeech-1.1/mels/LJ046-0058.pt|During his Presidency, Franklin D. Roosevelt made almost four hundred journeys and traveled more than three hundred fifty thousand miles.
LJSpeech-1.1/mels/LJ014-0076.pt|He was seen afterwards smoking and talking with his hosts in their back parlor, and never seen again alive.
LJSpeech-1.1/mels/LJ002-0043.pt|long narrow rooms -- one thirty-six feet, six twenty-three feet, and the eighth eighteen,
LJSpeech-1.1/mels/LJ009-0076.pt|We come to the sermon.
LJSpeech-1.1/mels/LJ017-0131.pt|even when the high sheriff had told him there was no possibility of a reprieve, and within a few hours of execution.
LJSpeech-1.1/mels/LJ046-0184.pt|but there is a system for the immediate notification of the Secret Service by the confining institution when a subject is released or escapes.
LJSpeech-1.1/mels/LJ014-0263.pt|When other pleasures palled he took a theatre, and posed as a munificent patron of the dramatic art.
LJSpeech-1.1/mels/LJ042-0096.pt|(old exchange rate) in addition to his factory salary of approximately equal amount
LJSpeech-1.1/mels/LJ049-0050.pt|Hill had both feet on the car and was climbing aboard to assist President and Mrs. Kennedy.
LJSpeech-1.1/mels/LJ019-0186.pt|seeing that since the establishment of the Central Criminal Court, Newgate received prisoners for trial from several counties,
LJSpeech-1.1/mels/LJ028-0307.pt|then let twenty days pass, and at the end of that time station near the Chaldasan gates a body of four thousand.
LJSpeech-1.1/mels/LJ012-0235.pt|While they were in a state of insensibility the murder was committed.
LJSpeech-1.1/mels/LJ034-0053.pt|reached the same conclusion as Latona that the prints found on the cartons were those of Lee Harvey Oswald.
LJSpeech-1.1/mels/LJ014-0030.pt|These were damnatory facts which well supported the prosecution.
LJSpeech-1.1/mels/LJ015-0203.pt|but were the precautions too minute, the vigilance too close to be eluded or overcome?
LJSpeech-1.1/mels/LJ028-0093.pt|but his scribe wrote it in the manner customary for the scribes of those days to write of their royal masters.
LJSpeech-1.1/mels/LJ002-0018.pt|The inadequacy of the jail was noticed and reported upon again and again by the grand juries of the city of London,
LJSpeech-1.1/mels/LJ028-0275.pt|At last, in the twentieth month,
LJSpeech-1.1/mels/LJ012-0042.pt|which he kept concealed in a hiding-place with a trap-door just under his bed.
LJSpeech-1.1/mels/LJ011-0096.pt|He married a lady also belonging to the Society of Friends, who brought him a large fortune, which, and his own money, he put into a city firm,
LJSpeech-1.1/mels/LJ036-0077.pt|Roger D. Craig, a deputy sheriff of Dallas County,
LJSpeech-1.1/mels/LJ016-0318.pt|Other officials, great lawyers, governors of prisons, and chaplains supported this view.
LJSpeech-1.1/mels/LJ013-0164.pt|who came from his room ready dressed, a suspicious circumstance, as he was always late in the morning.
LJSpeech-1.1/mels/LJ027-0141.pt|is closely reproduced in the life-history of existing deer. Or, in other words,
LJSpeech-1.1/mels/LJ028-0335.pt|accordingly they committed to him the command of their whole army, and put the keys of their city into his hands.
LJSpeech-1.1/mels/LJ031-0202.pt|Mrs. Kennedy chose the hospital in Bethesda for the autopsy because the President had served in the Navy.
LJSpeech-1.1/mels/LJ021-0145.pt|From those willing to join in establishing this hoped-for period of peace,
LJSpeech-1.1/mels/LJ016-0288.pt|"Müller, Müller, He's the man," till a diversion was created by the appearance of the gallows, which was received with continuous yells.
LJSpeech-1.1/mels/LJ028-0081.pt|Years later, when the archaeologists could readily distinguish the false from the true,
LJSpeech-1.1/mels/LJ018-0081.pt|his defense being that he had intended to commit suicide, but that, on the appearance of this officer who had wronged him,
LJSpeech-1.1/mels/LJ021-0066.pt|together with a great increase in the payrolls, there has come a substantial rise in the total of industrial profits
LJSpeech-1.1/mels/LJ009-0238.pt|After this the sheriffs sent for another rope, but the spectators interfered, and the man was carried back to jail.
LJSpeech-1.1/mels/LJ005-0079.pt|and improve the morals of the prisoners, and shall insure the proper measure of punishment to convicted offenders.
LJSpeech-1.1/mels/LJ035-0019.pt|drove to the northwest corner of Elm and Houston, and parked approximately ten feet from the traffic signal.
LJSpeech-1.1/mels/LJ036-0174.pt|This is the approximate time he entered the roominghouse, according to Earlene Roberts, the housekeeper there.
LJSpeech-1.1/mels/LJ046-0146.pt|The criteria in effect prior to November twenty-two, nineteen sixty-three, for determining whether to accept material for the PRS general files
LJSpeech-1.1/mels/LJ017-0044.pt|and the deepest anxiety was felt that the crime, if crime there had been, should be brought home to its perpetrator.
LJSpeech-1.1/mels/LJ017-0070.pt|but his sporting operations did not prosper, and he became a needy man, always driven to desperate straits for cash.
LJSpeech-1.1/mels/LJ014-0020.pt|He was soon afterwards arrested on suspicion, and a search of his lodgings brought to light several garments saturated with blood;
LJSpeech-1.1/mels/LJ016-0020.pt|He never reached the cistern, but fell back into the yard, injuring his legs severely.
LJSpeech-1.1/mels/LJ045-0230.pt|when he was finally apprehended in the Texas Theatre. Although it is not fully corroborated by others who were present,
LJSpeech-1.1/mels/LJ035-0129.pt|and she must have run down the stairs ahead of Oswald and would probably have seen or heard him.
LJSpeech-1.1/mels/LJ008-0307.pt|afterwards express a wish to murder the Recorder for having kept them so long in suspense.
LJSpeech-1.1/mels/LJ008-0294.pt|nearly indefinitely deferred.
LJSpeech-1.1/mels/LJ047-0148.pt|On October twenty-five,
LJSpeech-1.1/mels/LJ008-0111.pt|They entered a "stone cold room," and were presently joined by the prisoner.
LJSpeech-1.1/mels/LJ034-0042.pt|that he could only testify with certainty that the print was less than three days old.
LJSpeech-1.1/mels/LJ037-0234.pt|Mrs. Mary Brock, the wife of a mechanic who worked at the station, was there at the time and she saw a white male,
LJSpeech-1.1/mels/LJ040-0002.pt|Chapter seven. Lee Harvey Oswald: Background and Possible Motives, Part one.
LJSpeech-1.1/mels/LJ045-0140.pt|The arguments he used to justify his use of the alias suggest that Oswald may have come to think that the whole world was becoming involved
LJSpeech-1.1/mels/LJ012-0035.pt|the number and names on watches, were carefully removed or obliterated after the goods passed out of his hands.
LJSpeech-1.1/mels/LJ012-0250.pt|On the seventh July, eighteen thirty-seven,
LJSpeech-1.1/mels/LJ016-0179.pt|contracted with sheriffs and conveners to work by the job.
LJSpeech-1.1/mels/LJ016-0138.pt|at a distance from the prison.
LJSpeech-1.1/mels/LJ027-0052.pt|These principles of homology are essential to a correct interpretation of the facts of morphology.
LJSpeech-1.1/mels/LJ031-0134.pt|On one occasion Mrs. Johnson, accompanied by two Secret Service agents, left the room to see Mrs. Kennedy and Mrs. Connally.
LJSpeech-1.1/mels/LJ019-0273.pt|which Sir Joshua Jebb told the committee he considered the proper elements of penal discipline.
LJSpeech-1.1/mels/LJ014-0110.pt|At the first the boxes were impounded, opened, and found to contain many of O'Connor's effects.
LJSpeech-1.1/mels/LJ034-0160.pt|on Brennan's subsequent certain identification of Lee Harvey Oswald as the man he saw fire the rifle.
LJSpeech-1.1/mels/LJ038-0199.pt|eleven. If I am alive and taken prisoner,
LJSpeech-1.1/mels/LJ014-0010.pt|yet he could not overcome the strange fascination it had for him, and remained by the side of the corpse till the stretcher came.
LJSpeech-1.1/mels/LJ033-0047.pt|I noticed when I went out that the light was on, end quote,
LJSpeech-1.1/mels/LJ040-0027.pt|He was never satisfied with anything.
LJSpeech-1.1/mels/LJ048-0228.pt|and others who were present say that no agent was inebriated or acted improperly.
LJSpeech-1.1/mels/LJ003-0111.pt|He was in consequence put out of the protection of their internal law, end quote. Their code was a subject of some curiosity.
LJSpeech-1.1/mels/LJ008-0258.pt|Let me retrace my steps, and speak more in detail of the treatment of the condemned in those bloodthirsty and brutally indifferent days,
LJSpeech-1.1/mels/LJ029-0022.pt|The original plan called for the President to spend only one day in the State, making whirlwind visits to Dallas, Fort Worth, San Antonio, and Houston.
LJSpeech-1.1/mels/LJ004-0045.pt|Mr. Sturges Bourne, Sir James Mackintosh, Sir James Scarlett, and William Wilberforce.
|
PyTorch/Classification/ConvNets/scripts | scripts | rnxt_partial | FLAGS=$1
STAGE_ID=$2
STAGE_LEN=$3
python ./multiproc.py \
--nproc_per_node 8 \
./main.py /imagenet \
-j5 -p 100 \
--data-backend pytorch \
--raport-file report_$STAGE_ID.json \
--lr 1.024 \
--batch-size 128 \
--optimizer-batch-size 1024 \
--static-loss-scale 128 \
--warmup 8 \
--arch resnext101-32x4d -c fanin \
--label-smoothing 0.1 \
--lr-schedule cosine \
--mom 0.875 \
--wd 6.103515625e-05 \
--workspace /results \
--epochs 90 \
--run-epochs $STAGE_LEN \
$FLAGS \
--resume /results/checkpoint_$( expr $STAGE_ID - 1).pth.tar \
--checkpoint checkpoint_$STAGE_ID.pth.tar
|
PyTorch/SpeechSynthesis/HiFiGAN/scripts | scripts | train_benchmark | #!/usr/bin/env bash
export CUDNN_V8_API_ENABLED=1 # Keep the flag for older containers
export TORCH_CUDNN_V8_API_ENABLED=1
set -a
: ${RESUME:=false}
: ${AMP:=false}
: ${BATCH_SIZE:=16}
: ${NUM_GPUS:=8} # 1 4 8
: ${OUTPUT_DIR:="./results/perf-train"}
: ${EPOCHS:=1000000} # Prevents from saving a final checkpoint
: ${EPOCHS_THIS_JOB:=50}
: ${BMARK_EPOCHS_NUM:=40}
: ${VAL_INTERVAL:=100000} # In num of epochs
: ${SAMPLES_INTERVAL:=100000} # In num of epochs
: ${CHECKPOINT_INTERVAL:=100000} # In num of epochs
GRAD_ACCUMULATION=$((128 / $BATCH_SIZE / $NUM_GPUS ))
LOG_FILE=$OUTPUT_DIR/perf-train_amp-${AMP}_${NUM_GPUS}x${BATCH_SIZE}x${GRAD_ACCUMULATION}
LOG_FILE+=.json
bash scripts/train_lj22khz.sh "$@"
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/configuration | configuration | utils | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable
def optional_comparison(optional, value):
if optional is None:
return True
return optional == value
def one_field_from_list_of_dicts(dicts: Iterable[Dict], field: str, res_aggregator=list):
return res_aggregator(d[field] for d in dicts if field in d)
|
TensorFlow/Detection/SSD/models/research/object_detection/dataset_tools | dataset_tools | create_pet_tf_record | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert the Oxford pet dataset to TFRecord for object_detection.
See: O. M. Parkhi, A. Vedaldi, A. Zisserman, C. V. Jawahar
Cats and Dogs
IEEE Conference on Computer Vision and Pattern Recognition, 2012
http://www.robots.ox.ac.uk/~vgg/data/pets/
Example usage:
python object_detection/dataset_tools/create_pet_tf_record.py \
--data_dir=/home/user/pet \
--output_dir=/home/user/pet/output
"""
import hashlib
import io
import logging
import os
import random
import re
import contextlib2
from lxml import etree
import numpy as np
import PIL.Image
import tensorflow as tf
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
flags = tf.app.flags
flags.DEFINE_string('data_dir', '', 'Root directory to raw pet dataset.')
flags.DEFINE_string('output_dir', '', 'Path to directory to output TFRecords.')
flags.DEFINE_string('label_map_path', 'data/pet_label_map.pbtxt',
'Path to label map proto')
flags.DEFINE_boolean('faces_only', True, 'If True, generates bounding boxes '
'for pet faces. Otherwise generates bounding boxes (as '
'well as segmentations for full pet bodies). Note that '
'in the latter case, the resulting files are much larger.')
flags.DEFINE_string('mask_type', 'png', 'How to represent instance '
'segmentation masks. Options are "png" or "numerical".')
flags.DEFINE_integer('num_shards', 10, 'Number of TFRecord shards')
FLAGS = flags.FLAGS
def get_class_name_from_filename(file_name):
"""Gets the class name from a file.
Args:
file_name: The file name to get the class name from.
ie. "american_pit_bull_terrier_105.jpg"
Returns:
A string of the class name.
"""
match = re.match(r'([A-Za-z_]+)(_[0-9]+\.jpg)', file_name, re.I)
return match.groups()[0]
def dict_to_tf_example(data,
mask_path,
label_map_dict,
image_subdirectory,
ignore_difficult_instances=False,
faces_only=True,
mask_type='png'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
mask_path: String path to PNG encoded mask.
label_map_dict: A map from string label names to integers ids.
image_subdirectory: String specifying subdirectory within the
Pascal dataset directory holding the actual image data.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
faces_only: If True, generates bounding boxes for pet faces. Otherwise
generates bounding boxes (as well as segmentations for full pet bodies).
mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to
smaller file sizes.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(image_subdirectory, data['filename'])
with tf.gfile.GFile(img_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
with tf.gfile.GFile(mask_path, 'rb') as fid:
encoded_mask_png = fid.read()
encoded_png_io = io.BytesIO(encoded_mask_png)
mask = PIL.Image.open(encoded_png_io)
if mask.format != 'PNG':
raise ValueError('Mask format not PNG')
mask_np = np.asarray(mask)
nonbackground_indices_x = np.any(mask_np != 2, axis=0)
nonbackground_indices_y = np.any(mask_np != 2, axis=1)
nonzero_x_indices = np.where(nonbackground_indices_x)
nonzero_y_indices = np.where(nonbackground_indices_y)
width = int(data['size']['width'])
height = int(data['size']['height'])
xmins = []
ymins = []
xmaxs = []
ymaxs = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
masks = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
if faces_only:
xmin = float(obj['bndbox']['xmin'])
xmax = float(obj['bndbox']['xmax'])
ymin = float(obj['bndbox']['ymin'])
ymax = float(obj['bndbox']['ymax'])
else:
xmin = float(np.min(nonzero_x_indices))
xmax = float(np.max(nonzero_x_indices))
ymin = float(np.min(nonzero_y_indices))
ymax = float(np.max(nonzero_y_indices))
xmins.append(xmin / width)
ymins.append(ymin / height)
xmaxs.append(xmax / width)
ymaxs.append(ymax / height)
class_name = get_class_name_from_filename(data['filename'])
classes_text.append(class_name.encode('utf8'))
classes.append(label_map_dict[class_name])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
if not faces_only:
mask_remapped = (mask_np != 2).astype(np.uint8)
masks.append(mask_remapped)
feature_dict = {
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}
if not faces_only:
if mask_type == 'numerical':
mask_stack = np.stack(masks).astype(np.float32)
masks_flattened = np.reshape(mask_stack, [-1])
feature_dict['image/object/mask'] = (
dataset_util.float_list_feature(masks_flattened.tolist()))
elif mask_type == 'png':
encoded_mask_png_list = []
for mask in masks:
img = PIL.Image.fromarray(mask)
output = io.BytesIO()
img.save(output, format='PNG')
encoded_mask_png_list.append(output.getvalue())
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png_list))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
def create_tf_record(output_filename,
num_shards,
label_map_dict,
annotations_dir,
image_dir,
examples,
faces_only=True,
mask_type='png'):
"""Creates a TFRecord file from examples.
Args:
output_filename: Path to where output file is saved.
num_shards: Number of shards for output file.
label_map_dict: The label map dictionary.
annotations_dir: Directory where annotation files are stored.
image_dir: Directory where image files are stored.
examples: Examples to parse and save to tf record.
faces_only: If True, generates bounding boxes for pet faces. Otherwise
generates bounding boxes (as well as segmentations for full pet bodies).
mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to
smaller file sizes.
"""
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, output_filename, num_shards)
for idx, example in enumerate(examples):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(examples))
xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')
mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png')
if not os.path.exists(xml_path):
logging.warning('Could not find %s, ignoring example.', xml_path)
continue
with tf.gfile.GFile(xml_path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
try:
tf_example = dict_to_tf_example(
data,
mask_path,
label_map_dict,
image_dir,
faces_only=faces_only,
mask_type=mask_type)
if tf_example:
shard_idx = idx % num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
except ValueError:
logging.warning('Invalid example: %s, ignoring.', xml_path)
# TODO(derekjchow): Add test for pet/PASCAL main files.
def main(_):
data_dir = FLAGS.data_dir
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
logging.info('Reading from Pet dataset.')
image_dir = os.path.join(data_dir, 'images')
annotations_dir = os.path.join(data_dir, 'annotations')
examples_path = os.path.join(annotations_dir, 'trainval.txt')
examples_list = dataset_util.read_examples_list(examples_path)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(0.7 * num_examples)
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logging.info('%d training and %d validation examples.',
len(train_examples), len(val_examples))
train_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'pet_faces_val.record')
if not FLAGS.faces_only:
train_output_path = os.path.join(FLAGS.output_dir,
'pets_fullbody_with_masks_train.record')
val_output_path = os.path.join(FLAGS.output_dir,
'pets_fullbody_with_masks_val.record')
create_tf_record(
train_output_path,
FLAGS.num_shards,
label_map_dict,
annotations_dir,
image_dir,
train_examples,
faces_only=FLAGS.faces_only,
mask_type=FLAGS.mask_type)
create_tf_record(
val_output_path,
FLAGS.num_shards,
label_map_dict,
annotations_dir,
image_dir,
val_examples,
faces_only=FLAGS.faces_only,
mask_type=FLAGS.mask_type)
if __name__ == '__main__':
tf.app.run()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda | bermuda | onnx | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple(_get_dim(d) for d in shape.dim)
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
|
PyTorch/Classification/GPUNet/triton/runner | runner | preparer | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from datetime import datetime
from typing import Dict, List
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .configuration import Configuration
from .downloader import download
from .experiment import Experiment, Stage
from .logger import LOGGER
from .maintainer import Maintainer
from .pipeline import Pipeline
from .stages import ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage
from .task import Checkpoint, Dataset, SystemInfo, Task
from .triton import Triton
from .utils import clean_directory
class Preparer(abc.ABC):
"""
Runner preparer object.
"""
@abc.abstractmethod
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
pass
class ExperimentPreparer(Preparer):
"""
Experiment runner preparer object.
"""
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
LOGGER.info("Preparing Triton container image")
triton_container_image = self._prepare_triton_container_image(config, maintainer, triton)
LOGGER.info("Initialize task")
task = self._initialize_task(
workspace=workspace,
config=config,
pipeline=pipeline,
triton_container_image=triton_container_image,
logs_dir=logs_dir,
)
LOGGER.info("Preparing directories")
self._create_dirs(workspace, task)
LOGGER.info("Clean previous run artifacts directories")
self._clean_previous_run_artifacts(workspace, task)
LOGGER.info("Downloading checkpoints")
self._download_checkpoints(task)
return task
def _create_dirs(self, workspace: pathlib.Path, task: Task) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
for directory in [task.results_dir, task.logs_dir, task.checkpoints_dir]:
directory_path = workspace / directory
directory_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory} created.")
def _clean_previous_run_artifacts(self, workspace: pathlib.Path, task: Task) -> None:
"""
Clean logs from previous run
Returns:
None
"""
for directory in [
task.logs_dir,
task.results_dir,
]:
directory_path = workspace / directory
clean_directory(directory_path)
LOGGER.info(f"Location {directory} cleaned.")
def _prepare_triton_container_image(self, config: Config, maintainer: Maintainer, triton: Triton) -> str:
"""
Prepare Triton Container Image based on provided configuration
Returns:
Name of container image to use in process
"""
if not config.triton_dockerfile:
image_name = triton.container_image(config.container_version)
LOGGER.info(f"Using official Triton container image: {image_name}.")
return image_name
if config.triton_container_image:
LOGGER.info(f"Using provided Triton Container Image: {config.triton_container_image}")
return config.triton_container_image
normalized_model_name = config.model_name.lower().replace("_", "-")
image_name = f"tritonserver-{normalized_model_name}:latest"
LOGGER.info(f"Building Triton Container Image: {image_name}")
maintainer.build_image(
image_name=image_name,
image_file_path=pathlib.Path(config.triton_dockerfile),
build_args={"FROM_IMAGE": triton.container_image(container_version=config.container_version)},
)
return image_name
def _download_checkpoints(self, task: Task) -> None:
"""
Download checkpoints
"""
for variant, checkpoint in task.checkpoints.items():
checkpoint_url = checkpoint.url
download_path = checkpoint.path
if download_path.is_dir():
LOGGER.info(f"Checkpoint {download_path.name} already downloaded.")
continue
if not checkpoint_url:
LOGGER.warning(
f"Checkpoint {variant} url is not provided."
"\nIf you want to use that checkpoint please train the model locally"
f"\nand copy to {download_path} directory"
)
continue
download(checkpoint_url, download_path)
def _initialize_task(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
triton_container_image: str,
logs_dir: pathlib.Path,
) -> Task:
"""
Initialize task object
Args:
workspace: Path to workspace where artifacts are stored
config: Config object
pipeline: Pipeline object
triton_container_image: Triton Inference Server container image used for tests
Returns:
Task object
"""
datasets = {}
for dataset in config.datasets:
datasets[dataset.name] = Dataset(name=dataset.name)
checkpoints = {}
for checkpoint in config.checkpoints:
download_path = workspace / Task.checkpoints_dir / checkpoint.name
checkpoints[checkpoint.name] = Checkpoint(name=checkpoint.name, url=checkpoint.url, path=download_path)
results_types = self._task_results_types(pipeline=pipeline)
stages = {}
for stage in pipeline.stages():
stages[stage.label] = {"result_path": stage.result_path, "result_type": stage.result_type}
experiments = []
for idx, configuration in enumerate(config.configurations, start=1):
experiment = self._prepare_experiment(
idx=idx,
configuration=configuration,
results_types=results_types,
stages=stages,
)
experiments.append(experiment)
system_info = SystemInfo.from_host()
task = Task(
model_name=config.model_name,
ensemble_model_name=config.ensemble_model_name,
framework=config.framework,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config.datasets_dir,
experiments=experiments,
container_version=config.container_version,
system_info=system_info,
triton_container_image=triton_container_image,
triton_custom_operations=config.triton_custom_operations,
triton_load_model_method=config.triton_load_model_method,
started_at=int(datetime.utcnow().timestamp()),
batching=config.batching,
measurement_steps_offline=config.measurement_steps_offline,
measurement_steps_online=config.measurement_steps_online,
)
return task
def _task_results_types(self, pipeline: Pipeline) -> List[str]:
"""
Types of results generated as part of task
Returns:
List of result types
"""
results = []
for stage in pipeline.stages():
if TritonPerformanceOfflineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_OFFLINE)
continue
if TritonPerformanceOnlineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_ONLINE)
continue
return results
def _prepare_experiment(
self,
idx: int,
configuration: Configuration,
results_types: List[str],
stages: Dict,
) -> Experiment:
"""
Prepare experiments data
Args:
idx: Experiment index
configuration: Configuration object
results_types: Results types stored in experiment
stages: Stages executed as part of experiment
Returns:
Experiment object
"""
results_mapped = {}
for result_type in results_types:
results_mapped[result_type] = result_type
stages_mapped = {}
for name, stage_data in stages.items():
stages_mapped[name] = Stage(name=name, **stage_data)
experiment = Experiment(
experiment_id=idx,
parameters=configuration.parameters,
stages=stages_mapped,
results=results_mapped,
checkpoint=configuration.checkpoint,
)
return experiment
|
PyTorch/Detection/Efficientdet/effdet/layers | layers | create_conv2d | """ Create Conv2d Factory Method
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .mixed_conv2d import MixedConv2d
from .cond_conv2d import CondConv2d
from .conv2d_same import create_conv2d_pad
def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
""" Select a 2d convolution implementation based on arguments
Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
Used extensively by EfficientNet, MobileNetv3 and related networks.
"""
if isinstance(kernel_size, list):
assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
assert 'groups' not in kwargs # MixedConv groups are defined by kernel list
# We're going to use only lists for defining the MixedConv2d kernel groups,
# ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)
else:
depthwise = kwargs.pop('depthwise', False)
groups = out_channels if depthwise else kwargs.pop('groups', 1)
if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
else:
m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
return m |
TensorFlow/Translation/GNMT | GNMT | .gitignore | __pycache__
*.log
/results
/data
.DS_Store
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/triton | triton | tacotron2_ts-script_config | name: "tacotron2-ts-script"
platform: "pytorch_libtorch"
max_batch_size: 1
input [
{
name: "input__0"
data_type: TYPE_INT64
dims: [-1]
},
{
name: "input__1"
data_type: TYPE_INT64
dims: [1]
reshape: { shape: [ ] }
}
]
output [
{
name: "output__0"
data_type: TYPE_FP16
dims: [80, -1]
},
{
name: "output__1"
data_type: TYPE_INT32
dims: [1]
reshape: { shape: [ ] }
},
{
name: "output__2"
data_type: TYPE_FP16
dims: [-1, -1]
}
]
optimization {
cuda {
graphs: 1
}
}
|
PyTorch/Classification/ConvNets/triton/deployment_toolkit | deployment_toolkit | core | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
class Accelerator(Parameter):
AMP = "amp"
CUDA = "cuda"
TRT = "trt"
class Precision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
class Model(NamedTuple):
handle: object
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path]) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs/caffe2 | caffe2 | e2e_faster_rcnn_X_101_32x8d_FPN_1x_caffe2 | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://Caffe2Detectron/COCO/36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x"
BACKBONE:
CONV_BODY: "R-101-FPN"
OUT_CHANNELS: 256
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
ROI_HEADS:
USE_FPN: True
ROI_BOX_HEAD:
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"
PREDICTOR: "FPNPredictor"
RESNETS:
STRIDE_IN_1X1: False
NUM_GROUPS: 32
WIDTH_PER_GROUP: 8
DATASETS:
TEST: ("coco_2014_minival",)
DATALOADER:
SIZE_DIVISIBILITY: 32
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/denoiser | denoiser | denoiserStreamingInstance | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_DENOISERSTREAMINGINSTANCE_H
#define TT2I_DENOISERSTREAMINGINSTANCE_H
#include "binding.h"
#include "engineDriver.h"
#include "timedObject.h"
namespace nvinfer1
{
class ICudaEngine;
class IExecutionContext;
} // namespace nvinfer1
namespace tts
{
class DenoiserStreamingInstance : public TimedObject, public EngineDriver
{
public:
/**
* @brief Tensor of shape {1 x INPUT_LENGTH}
*/
static constexpr const char* const INPUT_NAME = "input_denoiser";
/**
* @brief Tensor of shape {1 x OUTPUT_LENGTH}
*/
static constexpr const char* const OUTPUT_NAME = "output_denoiser";
/**
* @brief Create a new denoiser.
*
* @param sampleNoise The audio sample of what should be "noise" to be
* removed.
* @param sampleLength The number of samples in the "noise".
* @param filterLength The filter length.
* @param overlapLength The length of overlap between filters.
* @param winLength The length of the window.
*/
DenoiserStreamingInstance(TRTPtr<nvinfer1::ICudaEngine>&& engine);
/**
* @brief Start a new session for performing streaming inference. This
* method should be called before the first call to `inferNext()`.
*/
void startInference();
/**
* @brief Perform inference on a chunk of input.
*
* @param batchSize The size of the batch to process.
* @param inputDevice The input tensor on the device.
* @param outputDevice The output tensor on the device.
* @param stream The stream to operate on.
*/
void inferNext(const int batchSize, const float* inputDevice, float* outputDevice, cudaStream_t stream);
/**
* @brief Get the size of the chunk the denoiser will process.
*
* @return The size of the chunk.
*/
int getChunkSize() const
{
return mChunkSize;
}
private:
Binding mBinding;
TRTPtr<nvinfer1::IExecutionContext> mContext;
int mChunkSize;
};
} // namespace tts
#endif
|
PyTorch/Detection/Efficientdet/scripts/docker | docker | launch | #!/bin/bash
PATH_TO_COCO=$1
MOUNT_LOCATION='/datasets/data'
NAME='detectron2_interactive'
docker run --runtime=nvidia --cap-add=SYS_PTRACE --cap-add SYS_ADMIN --cap-add DAC_READ_SEARCH --security-opt seccomp=unconfined -v /efficientdet-pytorch:/workspace/object_detection -v /effdet/backbone_checkpoints:/backbone_checkpoints -v /effdet/checkpoints:/checkpoints -v /coco2017/:/workspace/object_detection/datasets/coco -v /waymo_2D_object_detection/raw/:/workspace/object_detection/datasets/waymo --rm --name=$NAME --shm-size=30g --ulimit memlock=-1 --ulimit stack=67108864 --ipc=host -t -i nvcr.io/nvidia/effdet:21.06-py3-stage bash
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.