relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | ssd_mobilenet_v1_quantized_300x300_coco14_sync | # SSD with Mobilenet v1 with quantized training.
# Trained on COCO, initialized from Imagenet classification checkpoint
# Achieves 18.2 mAP on coco14 minival dataset.
# This config is TPU compatible
model {
ssd {
inplace_batchnorm_update: true
freeze_batchnorm: false
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
encode_background_as_zeros: true
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.2
max_scale: 0.95
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.3333
}
}
image_resizer {
fixed_shape_resizer {
height: 300
width: 300
}
}
box_predictor {
convolutional_box_predictor {
min_depth: 0
max_depth: 0
num_layers_before_predictor: 0
use_dropout: false
dropout_keep_probability: 0.8
kernel_size: 1
box_code_size: 4
apply_sigmoid_to_scores: false
class_prediction_bias_init: -4.6
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
center: true,
decay: 0.97,
epsilon: 0.001,
}
}
}
}
feature_extractor {
type: 'ssd_mobilenet_v1'
min_depth: 16
depth_multiplier: 1.0
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
center: true,
decay: 0.97,
epsilon: 0.001,
}
}
override_base_feature_extractor_hyperparams: true
}
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.75,
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
}
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
normalize_loc_loss_by_codesize: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
batch_size: 128
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 50000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
ssd_random_crop {
}
}
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .2
total_steps: 50000
warmup_learning_rate: 0.06
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-00000-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
num_examples: 8000
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-00000-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
}
graph_rewriter {
quantization {
delay: 48000
activation_bits: 8
weight_bits: 8
}
}
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime | runtime | evaluation | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to perform COCO evaluation."""
import numpy as np
from mrcnn_tf2.utils import coco_utils, coco_metric
def process_predictions(predictions):
""" Process the model predictions for COCO eval.
Converts boxes from [y1, x1, y2, x2] to [x1, y1, w, h] and scales them by image scale.
Flattens source_ids
Args:
predictions (dict): Predictions returned by model
Returns:
Converted prediction.
"""
image_info = predictions['image_info']
detection_boxes = predictions['detection_boxes']
for pred_id, box_id in np.ndindex(*detection_boxes.shape[:2]):
# convert from [y1, x1, y2, x2] to [x1, y1, w, h] * scale
scale = image_info[pred_id, 2]
y1, x1, y2, x2 = detection_boxes[pred_id, box_id, :]
new_box = np.array([x1, y1, x2 - x1, y2 - y1]) * scale
detection_boxes[pred_id, box_id, :] = new_box
# flatten source ids
predictions['source_ids'] = predictions['source_ids'].flatten()
return predictions
def evaluate(predictions, eval_file=None, include_mask=True):
""" Evaluates given iterable of predictions.
Args:
predictions (Iterable): Iterable of predictions returned from.
eval_file (Optional(str)): Path to file with eval annotations.
If None then groundtruth from feature will be used.
include_mask (bool): Indicates if eval mask should be included.
Returns:
"""
# convert from [y1, x1, y2, x2] to [x1, y1, w, h] * scale
predictions = process_predictions(predictions)
# create evaluation metric
eval_metric = coco_metric.EvaluationMetric(filename=eval_file, include_mask=include_mask)
# eval using the file or groundtruth from features
if eval_file is not None:
eval_results = eval_metric.predict_metric_fn(predictions)
else:
images, annotations = coco_utils.extract_coco_groundtruth(predictions, include_mask)
coco_dataset = coco_utils.create_coco_format_dataset(images, annotations)
eval_results = eval_metric.predict_metric_fn(predictions, groundtruth_data=coco_dataset)
return eval_results
|
TensorFlow/Detection/SSD/models/research/slim/scripts | scripts | finetune_inception_v1_on_flowers | #!/bin/bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script performs the following operations:
# 1. Downloads the Flowers dataset
# 2. Fine-tunes an InceptionV1 model on the Flowers training set.
# 3. Evaluates the model on the Flowers validation set.
#
# Usage:
# cd slim
# ./slim/scripts/finetune_inception_v1_on_flowers.sh
set -e
# Where the pre-trained InceptionV1 checkpoint is saved to.
PRETRAINED_CHECKPOINT_DIR=/tmp/checkpoints
# Where the training (fine-tuned) checkpoint and logs will be saved to.
TRAIN_DIR=/tmp/flowers-models/inception_v1
# Where the dataset is saved to.
DATASET_DIR=/tmp/flowers
# Download the pre-trained checkpoint.
if [ ! -d "$PRETRAINED_CHECKPOINT_DIR" ]; then
mkdir ${PRETRAINED_CHECKPOINT_DIR}
fi
if [ ! -f ${PRETRAINED_CHECKPOINT_DIR}/inception_v1.ckpt ]; then
wget http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz
tar -xvf inception_v1_2016_08_28.tar.gz
mv inception_v1.ckpt ${PRETRAINED_CHECKPOINT_DIR}/inception_v1.ckpt
rm inception_v1_2016_08_28.tar.gz
fi
# Download the dataset
python download_and_convert_data.py \
--dataset_name=flowers \
--dataset_dir=${DATASET_DIR}
# Fine-tune only the new layers for 2000 steps.
python train_image_classifier.py \
--train_dir=${TRAIN_DIR} \
--dataset_name=flowers \
--dataset_split_name=train \
--dataset_dir=${DATASET_DIR} \
--model_name=inception_v1 \
--checkpoint_path=${PRETRAINED_CHECKPOINT_DIR}/inception_v1.ckpt \
--checkpoint_exclude_scopes=InceptionV1/Logits \
--trainable_scopes=InceptionV1/Logits \
--max_number_of_steps=3000 \
--batch_size=32 \
--learning_rate=0.01 \
--save_interval_secs=60 \
--save_summaries_secs=60 \
--log_every_n_steps=100 \
--optimizer=rmsprop \
--weight_decay=0.00004
# Run evaluation.
python eval_image_classifier.py \
--checkpoint_path=${TRAIN_DIR} \
--eval_dir=${TRAIN_DIR} \
--dataset_name=flowers \
--dataset_split_name=validation \
--dataset_dir=${DATASET_DIR} \
--model_name=inception_v1
# Fine-tune all the new layers for 1000 steps.
python train_image_classifier.py \
--train_dir=${TRAIN_DIR}/all \
--dataset_name=flowers \
--dataset_split_name=train \
--dataset_dir=${DATASET_DIR} \
--checkpoint_path=${TRAIN_DIR} \
--model_name=inception_v1 \
--max_number_of_steps=1000 \
--batch_size=32 \
--learning_rate=0.001 \
--save_interval_secs=60 \
--save_summaries_secs=60 \
--log_every_n_steps=100 \
--optimizer=rmsprop \
--weight_decay=0.00004
# Run evaluation.
python eval_image_classifier.py \
--checkpoint_path=${TRAIN_DIR}/all \
--eval_dir=${TRAIN_DIR}/all \
--dataset_name=flowers \
--dataset_split_name=validation \
--dataset_dir=${DATASET_DIR} \
--model_name=inception_v1
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer | maintainer | maintainer_factory | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .docker.maintainer import DockerMaintainer
class MaintainerFactory:
@staticmethod
def create_docker_maintainer():
return DockerMaintainer()
|
PyTorch/SpeechRecognition/wav2vec2/wav2vec2 | wav2vec2 | arg_parser | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def populate(parser):
choices = ["pretrain", "finetune"]
parser.add_argument("mode", help="Training mode", choices=choices)
mode = parser.parse_args([a for a in sys.argv[1:] if a in choices]).mode
if mode == "pretrain":
populate_pretraining(parser)
else:
populate_finetuning(parser)
populate_common(parser)
return parser
def populate_infer(parser):
populate_finetuning(parser)
populate_common(parser)
_populate_infer(parser)
return parser
def populate_common(parser):
train = parser.add_argument_group("training setup")
train.add_argument("--epochs_this_job", default=0, type=int,
help="Run for a number of epochs and exit")
train.add_argument("--cudnn_benchmark", action="store_true",
help="Enable cudnn benchmark")
train.add_argument("--local_rank", "--local-rank", default=os.getenv("LOCAL_RANK", 0),
type=int, help="GPU id used for distributed training")
optim = parser.add_argument_group("optimization setup")
optim.add_argument("--optimizer", default="adam", type=str,
help="Optimization algorithm")
optim.add_argument("--ema", type=float, default=0.0,
help="Discount factor for EMA of model weights")
io = parser.add_argument_group("feature and checkpointing setup")
io.add_argument("--log_frequency", default=1, type=int,
help="Number of steps between printing training stats")
io.add_argument("--output_dir", type=str, required=True,
help="Directory for logs and checkpoints")
io.add_argument("--log_file", type=str, default=None,
help="Path to save the training logfile.")
io.add_argument("--benchmark_epochs_num", type=int, default=3,
help="Number of last epochs to calculate throughput stats")
ckpt = parser.add_argument_group("checkpoint")
ckpt.add_argument("--no_save", action="store_true",
help="Don't save models or checkpoints")
ckpt.add_argument("--resume", action="store_true",
help="Try to resume from last saved checkpoint")
ckpt.add_argument("--ckpt", default=None, type=str,
help="Path to a checkpoint for resuming training")
ckpt.add_argument("--save_frequency", default=10, type=int,
help="Checkpoint saving frequency in epochs")
ckpt.add_argument("--keep_milestones", default=[100, 200, 300, 400],
type=int, nargs="+",
help="Milestone checkpoints to keep from removing")
# io.add_argument("--save_best_from", default=380, type=int,
# help="Epoch on which to begin tracking best checkpoint (dev WER)")
common = parser.add_argument_group("common")
common.add_argument("--seed", type=int, default=1,
help="Pseudo random number generator seed")
common.add_argument("--cpu", action="store_true",
help="Use CPU instead of CUDA")
common.add_argument("--amp", action="store_true",
help="Use automatic mixed precision")
common.add_argument("--fp16", action="store_true",
help="If fp16 is being used")
common.add_argument("--bf16", action="store_true",
help="Train in bfloat16 precision")
common.add_argument("--min_loss_scale", type=float, default=0.0001,
help="Minimum FP16/AMP loss scale, after which "
"training is stopped")
common.add_argument("--fp16_init_scale", type=int, default=128,
help="Default FP16 loss scale")
common.add_argument("--fp32_transformer_layernorm", action="store_true",
help="Calculate MHA LayerNorms in full precision")
common.add_argument("--fp32_mha_softmax", action="store_true",
help="Calculate multi-head attention to FP32")
common.add_argument("--fp32_cosine_sim", action="store_true",
help="Calculate cosine similarity in FP32")
common.add_argument("--fp32_pos_conv", action="store_true",
help="Calculate positional conv in FP32")
common.add_argument("--fp32_conv_norms", action="store_true",
help="Calculate normalization in conv layers in FP32")
common.add_argument("--mha", type=str, default="fairseq",
choices=["fairseq", "pyt"], help="MHA implementation")
common.add_argument("--num_concat_batches", type=int, default=1)
dataset = parser.add_argument_group("dataset")
dataset.add_argument("--num_workers", type=int, default=6,
help="How many subprocesses to use for data loading")
dataset.add_argument("--skip_invalid_size_inputs_valid_test",
action="store_true",
help="Ignore too long or too short lines in valid and"
" test set")
dataset.add_argument("--max_tokens", type=int, default=1400000,
help="Maximum number of tokens in a batch")
dataset.add_argument("--max_tokens_valid", type=int, default=1400000,
help="Maximum number of tokens in a validation batch "
"(defaults to --max-tokens)")
dataset.add_argument("--required_batch_size_multiple", type=int, default=8,
help="Batch size will be a multiplier of this value")
dataset.add_argument("--required_seq_len_multiple", type=int, default=2,
help="Pad the input to encoder such that the sequence"
" length is divisible by multiple")
dataset.add_argument("--train_subset", type=str, default="train",
help="Data subset to use for training (e.g. train, "
"valid, test)")
dataset.add_argument("--valid_subset", type=str, default="valid",
help="Comma separated list of data subsets to use for"
" validation (e.g. train, valid, test)")
dataset.add_argument("--batch_size", type=int, default=None,
help="Number of examples in a batch")
dataset.add_argument("--batch_size_valid", type=int, default=None,
help="Batch size of the validation batch (defaults "
"to --batch-size)")
task = parser.add_argument_group("task")
task.add_argument("--data", type=str,
default="/workspace/fairseq/librispeech",
help="Path to data directory")
task.add_argument("--sample_rate", type=int, default=16000,
help="Target sample rate. audio files will be up/down "
"sampled to this rate")
task.add_argument("--enable_padding", action="store_true",
help="Pad shorter samples instead of cropping")
task.add_argument("--min_sample_size", type=int, default=None,
help="Min sample size to crop to for batching")
task.add_argument("--max_sample_size", type=int, default=None,
help="Max sample size to crop to for batching")
task.add_argument("--num_batch_buckets", type=int, default=0,
help="If >0, then bucket source and target lengths into "
"N buckets and pad accordingly; this is useful on "
"TPUs to minimize the number of compilations")
opt = parser.add_argument_group("optimization & optimizer")
opt.add_argument("--max_update", type=int, default=400000,
help="Force stop training at specified update")
opt.add_argument("--update_freq", type=int, nargs="+", default=[64],
help="Accumulate grads and update params every N batches")
opt.add_argument("--lr", type=float, nargs="+", default=[0.0005],
help="Max learning rate, must be more than cfg.min_lr")
opt.add_argument("--adam_betas", type=float, nargs="+", default=[0.9, 0.98],
help="Betas for Adam optimizer")
opt.add_argument("--adam_eps", type=float, default=1e-06,
help="Epsilon for Adam optimizer")
opt.add_argument("--weight_decay", type=float, default=0.01,
help="Weight decay")
opt.add_argument("--clip_norm", type=float, default=0.0,
help="Clip threshold of gradients")
sched = parser.add_argument_group("lr_scheduler")
sched.add_argument("--lr_policy", type=str, default="poly",
choices=["poly", "exp"], help="LR decay policy")
sched.add_argument("--warmup_updates", type=int, default=32000,
help="Warmup the learning rate linearly for the first "
"N updates")
sched.add_argument("--hold_updates", type=int, default=0,
help="The number of updates with const learning rate")
sched.add_argument("--initial_lr_scale", type=float, default=0.0,
help="Initial learning rate scale")
sched.add_argument("--final_lr_scale", type=float, default=0.0,
help="Final learning rate scale")
sched.add_argument("--lr_poly_power", type=float, default=1.0,
help="Poly lr policy policy power")
sched.add_argument("--lr_exp_decay", type=float, default=None,
help="Exp lr policy decay factor")
drop = parser.add_argument_group("dropout")
drop.add_argument("--dropout", type=float, default=0.1,
help="Dropout probability for the transformer")
drop.add_argument("--attention_dropout", type=float, default=0.0,
help="Dropout probability for attention weights")
drop.add_argument("--activation_dropout", type=float, default=0.0,
help="Dropout probability after activation in FFN")
drop.add_argument("--dropout_input", type=float, default=0.1,
help="Dropout to apply to the input (after feat extr)")
drop.add_argument("--dropout_features", type=float, default=0.1,
help="Dropout to apply to the features (after feat extr)")
mask = parser.add_argument_group("input masking")
mask.add_argument("--apply_mask", action="store_true",
help="Apply masking during fine-tuning")
mask.add_argument("--mask_length", type=int, default=10,
help="Repeat the mask indices multiple times")
mask.add_argument("--mask_prob", type=float, default=0.5,
help="Probability of replacing a token with mask "
"(normalized by length)")
mask.add_argument("--require_same_masks", type=bool, default=True,
help="Whether to number of masked timesteps must be the"
" same across all examples in a batch")
mask.add_argument("--mask_selection", default="static",
choices=["static", "uniform", "normal", "poisson"],
help="How to choose masks")
mask.add_argument("--mask_other", type=float, default=0,
help="Secondary mask argument (used for more complex "
"distributions), see help in compute_mask_indices")
mask.add_argument("--no_mask_overlap", type=bool, default=False,
help="Whether to allow masks to overlap")
mask.add_argument("--mask_min_space", type=int, default=1,
help="Min space between spans (if no overlap is enabled)")
mask.add_argument("--mask_channel_length", type=int, default=10,
help="Length of the mask for features (channels)")
mask.add_argument("--mask_channel_prob", type=float, default=0.0,
help="Probability of replacing a feature with 0")
mask.add_argument("--mask_channel_before", type=bool, default=False,
help="Apply channel-masking before frequency-masking")
mask.add_argument("--mask_channel_selection", default="static",
choices=["static", "uniform", "normal", "poisson"],
help="How to choose mask length for channel masking")
mask.add_argument("--mask_channel_other", type=float, default=0,
help="Secondary mask argument (used for more complex "
"distributions), see help in compute_mask_indicesh")
mask.add_argument("--no_mask_channel_overlap", type=bool, default=False,
help="Whether to allow channel masks to overlap")
mask.add_argument("--mask_channel_min_space", type=int, default=1,
help="Min space between spans (if no overlap is enabled)")
parser.add_argument("--feature_grad_mult", type=float, default=0.1,
help="Reset feature grad mult in wav2vec 2.0 to this")
# NOTE In Fairseq this is called `--layerdrop` in fine-tuning yamls
parser.add_argument("--encoder_layerdrop", type=float, default=0.05,
help="Probability of dropping a layer in wav2vec 2.0")
mask.add_argument("--mask_dropout", type=float, default=0.0,
help="Percent of masks to unmask for each sample")
def populate_finetuning(parser):
"""Args for fine-tuning, absent from pre-trained ckpts."""
ft = parser.add_argument_group("supervised fine-tuning")
ft.add_argument("--final_dropout", type=float, default=0.0,
help="Dropout after transformer and before final proj")
ft.add_argument("--w2v_path", type=str, default=None,
help="Path to wav2vec 2.0 model")
ft.add_argument("--blank_weight", type=float, default=0)
ft.add_argument("--blank_mode", type=str, default="add")
ft.add_argument("--labels", type=str, default="ltr",
help="Extension of the label file to load for fine-tuning")
ft.add_argument("--freeze_finetune_updates", type=int, default=0,
help="Don't finetune wav2vec for this many updates")
def populate_pretraining(parser):
"""During fine-tuning these parameters will be loaded from a ckpt."""
model = parser.add_argument_group("model")
model.add_argument("--extractor_mode", type=str, default="default",
help="Mode for feature extractor. default has a single "
"group norm with d groups in the first conv block,"
" whereas layer_norm has layer norms in every "
"block (meant to use with normalize=True)")
model.add_argument("--encoder_layers", type=int, default=12,
help="Num encoder layers in the transformer")
model.add_argument("--encoder_embed_dim", type=int, default=768,
help="Encoder embedding dimension")
model.add_argument("--encoder_ffn_embed_dim", type=int, default=3072,
help="Encoder embedding dimension for FFN")
model.add_argument("--encoder_attention_heads", type=int, default=12,
help="Num encoder attention heads")
model.add_argument("--activation_fn", type=str, default="gelu",
help="Activation function to use")
model.add_argument("--final_dim", type=int, default=256,
help="Project final representations and targets to this"
" many dimensions. set to encoder_embed_dim "
"is <= 0")
model.add_argument("--layer_norm_first", action="store_true",
help="Apply layernorm first in the transformer")
model.add_argument("--conv_feature_layers", type=str,
default="[(512,10,5)]+[(512,3,2)]*4+[(512,2,2)]+[(512,2,2)]",
help="String describing convolutional feature "
"extraction layers in form of a python list that "
"contains [(dim, kernel_size, stride), ...]")
model.add_argument("--conv_bias", action="store_true",
help="Include bias in conv encoder")
model.add_argument("--logit_temp", type=float, default=0.1,
help="Temperature to divide logits by")
model.add_argument("--quantize_targets", action="store_true",
help="Use quantized targets")
model.add_argument("--quantize_input", action="store_true",
help="Use quantized inputs")
model.add_argument("--target_glu", action="store_true",
help="Adds projection + glu to targets")
model.add_argument("--quantizer_depth", type=int, default=1,
help="Number of quantizer layers")
model.add_argument("--quantizer_factor", type=int, default=3,
help="Dimensionality increase for inner quantizer "
"layers (if depth > 1)")
model.add_argument("--latent_vars", type=int, default=320,
help="Number of latent variables V in each group of the"
" codebook")
model.add_argument("--latent_groups", type=int, default=2,
help="Number of groups G of latent variables in the "
"codebook")
model.add_argument("--latent_dim", type=int, default=0,
help="If > 0, uses this dimensionality for latent var"
"iables. otherwise uses final_dim / latent_groups")
model.add_argument("--num_negatives", type=int, default=100,
help="Num of sampled negatives")
model.add_argument("--negatives_from_everywhere", action="store_true",
help="Sample negatives from everywhere, not just masked"
" states")
model.add_argument("--cross_sample_negatives", type=int, default=0,
help="Num of cross sampled negatives")
model.add_argument("--codebook_negatives", type=int, default=0,
help="Number of negative examples codebook")
model.add_argument("--conv_pos", type=int, default=128,
help="Number of filters for convolutional positional "
"embeddings")
model.add_argument("--conv_pos_groups", type=int, default=16,
help="Number of groups for convolutional positional "
"embedding")
model.add_argument("--latent_temp", type=float, nargs="+",
default=[2.0, 0.5, 0.999995],
help="Legacy (to be removed)")
model.add_argument("--normalize", action="store_true",
help="If set, normalizes input to have 0 mean and unit "
"variance")
parser.add_argument("--log_keys", type=str, nargs="*",
default=["prob_perplexity", "code_perplexity", "temp"],
help="Additional output keys to log")
crit = parser.add_argument_group("criterion")
crit.add_argument("--infonce", action="store_true",
help="If set, uses cross entropy instead of binary cross"
" entropy (i.e. InfoNCE loss)")
crit.add_argument("--loss_weights", type=float, nargs="*",
default=[0.1, 10.0], help="Weights for the loss terms")
joc = parser.add_argument_group("joc experimental")
joc.add_argument("--use_spectrogram_features", action="store_true",
help="Train on input spectrograms")
joc.add_argument("--rotary_embeddings", action="store_true",
help="Use rotarty embeddings for Transformer layers")
joc.add_argument("--hourglass_transformer", type=str, default=None,
help="Specify the number of layers and shorteining, e.g.,"
" [n_pre,(n_hourglass, shorten_factor),n_post]")
joc.add_argument("--hourglass_resample", type=str, default="naive",
help="Method of up/downsampling in the hourglass model")
joc.add_argument("--spectrogram_feature_stacking", type=int, default=1)
joc.add_argument("--spectrogram_feature_subsampling", type=int, default=1)
joc.add_argument("--spectrogram_window_size", type=float, default=0.02)
joc.add_argument("--spectrogram_window_stride", type=float, default=0.01)
joc.add_argument("--spectrogram_n_filt", type=int, default=80)
return parser
def _populate_infer(parser):
# Fine-tuning only
infer = parser.add_argument_group("inference")
infer.add_argument("--steps", default=0, type=int,
help="Eval this many steps for every worker")
infer.add_argument("--warmup_steps", default=0, type=int,
help="Burn-in period before measuring latencies")
infer.add_argument("--labels_path", type=str, default=None,
help="Path to output labels file, e.g., dict.ltr.txt")
infer.add_argument("--save_predictions", type=str, default=None,
help="Save predictions in text form at this location")
infer.add_argument("--save_logits", default=None, type=str,
help="Save output logits under specified path")
infer.add_argument("--transcribe_wav", type=str,
help="Path to a single .wav file (16KHz)")
infer.add_argument("--transcribe_filelist", type=str,
help="Path to a filelist with one .wav path per line")
infer.add_argument("--torchscript", action="store_true",
help="Evaluate with a TorchScripted model")
infer.add_argument("--w2v_path_for_args", type=str, default=None,
help="Args to build model for inference (weights will "
"be loaded from --w2v_path)")
|
TensorFlow2/Detection/Efficientdet/scripts/D0 | D0 | training-benchmark-TF32-A100-80G | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bs=104
ep=1
lr=1.1
wu=25
ema=0.999
momentum=0.93
visible_devices=$(seq -s, 0 $((${NGPU:-8}-1)))
mkdir -p /tmp/training-benchmark-TF32-A100-80G
rm -rf /tmp/training-benchmark-TF32-A100-80G/*
mpirun -np ${NGPU:-8} --allow-run-as-root --bind-to none \
-map-by slot -x LD_LIBRARY_PATH -x PATH \
-mca pml ob1 -mca btl ^openib \
-x CUDA_VISIBLE_DEVICES=$visible_devices \
-x TF_GPU_HOST_MEM_LIMIT_IN_MB=131072 \
python3 train.py \
--training_file_pattern=/workspace/coco/train-* \
--val_file_pattern=/workspace/coco/val-* \
--val_json_file=/workspace/coco/annotations/instances_val2017.json \
--model_name=efficientdet-d0 \
--model_dir=/tmp/training-benchmark-TF32-A100-80G \
--backbone_init=/workspace/checkpoints/efficientnet-b0-joc \
--batch_size=$bs \
--num_epochs=$ep \
--use_xla=True \
--amp=False \
--lr=$lr \
--warmup_epochs=$wu \
--benchmark=True \
--benchmark_steps=500 \
--hparams="moving_average_decay=$ema,momentum=$momentum" \
2>&1 | tee /tmp/training-benchmark-TF32-A100-80G/train-benchmark.log |
PyTorch/Classification/GPUNet/triton/065ms/runner | runner | pipeline_impl | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/0.65ms.json \
--checkpoint ${CHECKPOINT_DIR}/0.65ms.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--val-path ${DATASETS_DIR}/ \
--is-prunet False \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) |
TensorFlow2/LanguageModeling/ELECTRA | ELECTRA | file_utils | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from typing import Optional
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import boto3
import requests
from botocore.config import Config
from botocore.exceptions import ClientError
from filelock import FileLock
from tqdm.auto import tqdm
# from examples import __version__
__version__ = "0.1"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"):
import torch
_torch_available = True # pylint: disable=invalid-name
logger.info("PyTorch version {} available.".format(torch.__version__))
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
except ImportError:
_torch_available = False # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"):
import tensorflow as tf
assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2
_tf_available = True # pylint: disable=invalid-name
logger.info("TensorFlow version {} available.".format(tf.__version__))
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
except (ImportError, AssertionError):
_tf_available = False # pylint: disable=invalid-name
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
default_cache_path = os.path.join(torch_cache_home, "transformers")
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv("PYTORCH_TRANSFORMERS_CACHE", os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path))
)
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv(
"PYTORCH_TRANSFORMERS_CACHE", os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
)
PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
CONFIG_NAME = "config.json"
MODEL_CARD_NAME = "modelcard.json"
MULTIPLE_CHOICE_DUMMY_INPUTS = [[[0], [1]], [[0], [1]]]
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://d2ws9o8vfrpkyk.cloudfront.net"
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_start_docstrings_to_callable(*docstr):
def docstring_decorator(fn):
class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0])
intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name)
note = r"""
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
pre and post processing steps while the latter silently ignores them.
"""
fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + "".join(docstr)
return fn
return docstring_decorator
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3")
def hf_bucket_url(identifier, postfix=None, cdn=False) -> str:
endpoint = CLOUDFRONT_DISTRIB_PREFIX if cdn else S3_BUCKET_PREFIX
if postfix is None:
return "/".join((endpoint, identifier))
else:
return "/".join((endpoint, identifier, postfix))
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent=None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url, proxies=None):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file, proxies=None):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None, resume_size=0, user_agent=None):
ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
if is_torch_available():
ua += "; torch/{}".format(torch.__version__)
if is_tf_available():
ua += "; tensorflow/{}".format(tf.__version__)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
headers = {"user-agent": ua}
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent=None,
local_files_only=False,
) -> Optional[str]:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
etag = None
if not local_files_only:
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url, proxies=proxies)
else:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [
file
for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(matching_files) > 0:
return os.path.join(cache_dir, matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
return None
# From now on, etag is not None.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
if resume_download:
logger.warn('Warning: resumable downloads are not implemented for "s3://" urls')
s3_get(url, temp_file, proxies=proxies)
else:
http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
logger.info("storing %s in cache at %s", url, cache_path)
os.replace(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
|
PyTorch/Classification/ConvNets/triton/scripts | scripts | setup_parameters | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export PRECISION="fp16"
export FORMAT="onnx"
export BATCH_SIZE="1,2,4,8,16,32,64,128"
export BACKEND_ACCELERATOR="trt"
export MAX_BATCH_SIZE="128"
export NUMBER_OF_MODEL_INSTANCES="1"
export TRITON_MAX_QUEUE_DELAY="1"
export TRITON_PREFERRED_BATCH_SIZES="64 128"
|
PyTorch/SpeechSynthesis/FastPitch/scripts | scripts | train | #!/usr/bin/env bash
export OMP_NUM_THREADS=1
: ${NUM_GPUS:=8}
: ${BATCH_SIZE:=16}
: ${GRAD_ACCUMULATION:=2}
: ${OUTPUT_DIR:="./output"}
: ${LOG_FILE:=$OUTPUT_DIR/nvlog.json}
: ${DATASET_PATH:=LJSpeech-1.1}
: ${TRAIN_FILELIST:=filelists/ljs_audio_pitch_text_train_v3.txt}
: ${VAL_FILELIST:=filelists/ljs_audio_pitch_text_val.txt}
: ${AMP:=false}
: ${SEED:=""}
: ${LEARNING_RATE:=0.1}
# Adjust these when the amount of data changes
: ${EPOCHS:=1000}
: ${EPOCHS_PER_CHECKPOINT:=20}
: ${WARMUP_STEPS:=1000}
: ${KL_LOSS_WARMUP:=100}
# Train a mixed phoneme/grapheme model
: ${PHONE:=true}
# Enable energy conditioning
: ${ENERGY:=true}
: ${TEXT_CLEANERS:=english_cleaners_v2}
# Add dummy space prefix/suffix is audio is not precisely trimmed
: ${APPEND_SPACES:=false}
: ${LOAD_PITCH_FROM_DISK:=true}
: ${LOAD_MEL_FROM_DISK:=false}
# For multispeaker models, add speaker ID = {0, 1, ...} as the last filelist column
: ${NSPEAKERS:=1}
: ${SAMPLING_RATE:=22050}
# Adjust env variables to maintain the global batch size: NUM_GPUS x BATCH_SIZE x GRAD_ACCUMULATION = 256.
GBS=$(($NUM_GPUS * $BATCH_SIZE * $GRAD_ACCUMULATION))
[ $GBS -ne 256 ] && echo -e "\nWARNING: Global batch size changed from 256 to ${GBS}."
echo -e "\nAMP=$AMP, ${NUM_GPUS}x${BATCH_SIZE}x${GRAD_ACCUMULATION}" \
"(global batch size ${GBS})\n"
# ARGS=""
ARGS+=" --cuda"
ARGS+=" -o $OUTPUT_DIR"
ARGS+=" --log-file $LOG_FILE"
ARGS+=" --dataset-path $DATASET_PATH"
ARGS+=" --training-files $TRAIN_FILELIST"
ARGS+=" --validation-files $VAL_FILELIST"
ARGS+=" -bs $BATCH_SIZE"
ARGS+=" --grad-accumulation $GRAD_ACCUMULATION"
ARGS+=" --optimizer lamb"
ARGS+=" --epochs $EPOCHS"
ARGS+=" --epochs-per-checkpoint $EPOCHS_PER_CHECKPOINT"
ARGS+=" --warmup-steps $WARMUP_STEPS"
ARGS+=" -lr $LEARNING_RATE"
ARGS+=" --weight-decay 1e-6"
ARGS+=" --grad-clip-thresh 1000.0"
ARGS+=" --dur-predictor-loss-scale 0.1"
ARGS+=" --pitch-predictor-loss-scale 0.1"
ARGS+=" --trainloader-repeats 100"
ARGS+=" --validation-freq 10"
# Autoalign & new features
ARGS+=" --kl-loss-start-epoch 0"
ARGS+=" --kl-loss-warmup-epochs $KL_LOSS_WARMUP"
ARGS+=" --text-cleaners $TEXT_CLEANERS"
ARGS+=" --n-speakers $NSPEAKERS"
[ "$AMP" = "true" ] && ARGS+=" --amp"
[ "$PHONE" = "true" ] && ARGS+=" --p-arpabet 1.0"
[ "$ENERGY" = "true" ] && ARGS+=" --energy-conditioning"
[ "$SEED" != "" ] && ARGS+=" --seed $SEED"
[ "$LOAD_MEL_FROM_DISK" = true ] && ARGS+=" --load-mel-from-disk"
[ "$LOAD_PITCH_FROM_DISK" = true ] && ARGS+=" --load-pitch-from-disk"
[ "$PITCH_ONLINE_DIR" != "" ] && ARGS+=" --pitch-online-dir $PITCH_ONLINE_DIR" # e.g., /dev/shm/pitch
[ "$PITCH_ONLINE_METHOD" != "" ] && ARGS+=" --pitch-online-method $PITCH_ONLINE_METHOD"
[ "$APPEND_SPACES" = true ] && ARGS+=" --prepend-space-to-text"
[ "$APPEND_SPACES" = true ] && ARGS+=" --append-space-to-text"
[[ "$ARGS" != *"--checkpoint-path"* ]] && ARGS+=" --resume"
if [ "$SAMPLING_RATE" == "44100" ]; then
ARGS+=" --sampling-rate 44100"
ARGS+=" --filter-length 2048"
ARGS+=" --hop-length 512"
ARGS+=" --win-length 2048"
ARGS+=" --mel-fmin 0.0"
ARGS+=" --mel-fmax 22050.0"
elif [ "$SAMPLING_RATE" != "22050" ]; then
echo "Unknown sampling rate $SAMPLING_RATE"
exit 1
fi
mkdir -p "$OUTPUT_DIR"
: ${DISTRIBUTED:="-m torch.distributed.launch --nproc_per_node $NUM_GPUS"}
python $DISTRIBUTED train.py $ARGS "$@"
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/transforms | transforms | one_hot_encoding | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from syngen.generator.tabular.transforms.base_transform import BaseTransform
class OneHotEncoding(BaseTransform):
"""OneHotEncoding for categorical data.
Adopted from: https://github.com/sdv-dev/CTGAN
This transformer replaces a single vector with N unique categories in it
with N vectors which have 1s on the rows where the corresponding category
is found and 0s on the rest.
Null values are considered just another category.
Args:
error_on_unknown (bool):
If a value that was not seen during the fit stage is passed to
transform, then an error will be raised if this is True.
"""
dummies = None
_dummy_na = None
_num_dummies = None
_dummy_encoded = False
_indexer = None
_uniques = None
def __init__(self, error_on_unknown=True):
self.error_on_unknown = error_on_unknown
@staticmethod
def _prepare_data(data):
"""Convert data to appropriate format.
If data is a valid list or a list of lists,
transforms it into an np.array, otherwise returns it.
Args:
data (pandas.Series, numpy.ndarray, list or list of lists):
Data to prepare.
Returns:
pandas.Series or numpy.ndarray
"""
if isinstance(data, list):
data = np.array(data)
if len(data.shape) > 2:
raise ValueError("Unexpected format.")
if len(data.shape) == 2:
if data.shape[1] != 1:
raise ValueError("Unexpected format.")
data = data[:, 0]
return data
def _transform(self, data):
if self._dummy_encoded:
coder = self._indexer
codes = pd.Categorical(data, categories=self._uniques).codes
else:
coder = self._uniques
codes = data
rows = len(data)
dummies = np.broadcast_to(coder, (rows, self._num_dummies))
coded = np.broadcast_to(codes, (self._num_dummies, rows)).T
array = (coded == dummies).astype(int)
if self._dummy_na:
null = np.zeros((rows, 1), dtype=int)
null[pd.isnull(data)] = 1
array = np.append(array, null, axis=1)
return array
def fit(self, data):
"""Fit the transformer to the data.
Get the pandas `dummies` which will be used later on for OneHotEncoding.
Args:
data (pandas.Series, numpy.ndarray, list or list of lists):
Data to fit the transformer to.
"""
data = self._prepare_data(data)
null = pd.isnull(data)
self._uniques = list(pd.unique(data[~null]))
self._dummy_na = null.any()
self._num_dummies = len(self._uniques)
self._indexer = list(range(self._num_dummies))
self.dummies = self._uniques.copy()
if not np.issubdtype(data.dtype, np.number):
self._dummy_encoded = True
if self._dummy_na:
self.dummies.append(np.nan)
def transform(self, data):
"""Replace each category with the OneHot vectors.
Args:
data (pandas.Series, numpy.ndarray, list or list of lists):
Data to transform.
Returns:
numpy.ndarray:
"""
data = self._prepare_data(data)
array = self._transform(data)
if self.error_on_unknown:
unknown = array.sum(axis=1) == 0
if unknown.any():
raise ValueError(
f"Attempted to transform {list(data[unknown])} ",
"that were not seen during fit stage.",
)
return array
def inverse_transform(self, data):
"""Convert float values back to the original categorical values.
Args:
data (numpy.ndarray):
Data to revert.
Returns:
pandas.Series
"""
if data.ndim == 1:
data = data.reshape(-1, 1)
indices = np.argmax(data, axis=1)
return pd.Series(indices).map(self.dummies.__getitem__)
|
CUDA-Optimized/FastSpeech/scripts | scripts | install | #!/bin/bash
# install ffmpeg
# apt-get update
# apt-get install -y ffmpeg
# install the project and its dependencies
pip install --user --no-cache-dir . |
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks | networks | masked_lm_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for masked language model network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.networks import masked_lm
from official.nlp.modeling.networks import transformer_encoder
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class MaskedLMTest(keras_parameterized.TestCase):
def create_network(self,
vocab_size,
sequence_length,
hidden_size,
num_predictions,
output='predictions',
xformer_stack=None):
# First, create a transformer stack that we can use to get the LM's
# vocabulary weight.
if xformer_stack is None:
xformer_stack = transformer_encoder.TransformerEncoder(
vocab_size=vocab_size,
num_layers=1,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_attention_heads=4,
)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
lm_outputs, _ = xformer_stack([word_ids, mask, type_ids])
# Create a maskedLM from the transformer stack.
test_network = masked_lm.MaskedLM(
num_predictions=num_predictions,
input_width=lm_outputs.shape[-1],
source_network=xformer_stack,
output=output)
return test_network
def test_network_creation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Make sure that the output tensor of the masked LM is the right shape.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_network([lm_input_tensor, masked_lm_positions])
expected_output_shape = [None, num_predictions, vocab_size]
self.assertEqual(expected_output_shape, output.shape.as_list())
def test_network_invocation_with_internal_logits(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_network([lm_input_tensor, masked_lm_positions])
model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output)
logits_model = tf.keras.Model(test_network.inputs, test_network.logits)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
outputs = model.predict([lm_input_data, masked_position_data])
logits = logits_model.predict([lm_input_data, masked_position_data])
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_predictions, vocab_size)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
calculated_softmax = softmax_model.predict(logits)
self.assertAllClose(outputs, calculated_softmax)
def test_network_invocation_with_external_logits(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
xformer_stack = transformer_encoder.TransformerEncoder(
vocab_size=vocab_size,
num_layers=1,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_attention_heads=4,
)
test_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions,
xformer_stack=xformer_stack,
output='predictions')
logit_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions,
xformer_stack=xformer_stack,
output='logits')
logit_network.set_weights(test_network.get_weights())
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_network([lm_input_tensor, masked_lm_positions])
logit_output = logit_network([lm_input_tensor, masked_lm_positions])
model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output)
logits_model = tf.keras.Model(([lm_input_tensor, masked_lm_positions]),
logit_output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
outputs = model.predict([lm_input_data, masked_position_data])
logits = logits_model.predict([lm_input_data, masked_position_data])
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_predictions, vocab_size)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
calculated_softmax = softmax_model.predict(logits)
self.assertAllClose(outputs, calculated_softmax)
def test_network_invocation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_network([lm_input_tensor, masked_lm_positions])
model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
_ = model.predict([lm_input_data, masked_position_data])
def test_unknown_output_type_fails(self):
with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'):
_ = self.create_network(
vocab_size=8,
sequence_length=8,
hidden_size=8,
num_predictions=8,
output='bad')
if __name__ == '__main__':
tf.test.main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf | conf | train_config | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The order in this list matters a lot! An element in this list can only be modified by a subsequent one!
defaults:
- model: ???
- dataset: electricity
- evaluator: ${if:${cmp:${oc.select:trainer, ctltrainer}, xgbtrainer}, xgbevaluator, ${if:${cmp:${oc.select:trainer, ctltrainer}, stattrainer}, statevaluator, ctlevaluator}}
- optional model_dataset@_global_: ${model}_${dataset}
- train_derived_fields
- _self_
seed: 1
|
TensorFlow/Detection/SSD/models/research/object_detection/dockerfiles/android | android | README | # Dockerfile for the TPU and TensorFlow Lite Object Detection tutorial
This Docker image automates the setup involved with training
object detection models on Google Cloud and building the Android TensorFlow Lite
demo app. We recommend using this container if you decide to work through our
tutorial on ["Training and serving a real-time mobile object detector in
30 minutes with Cloud TPUs"](https://medium.com/tensorflow/training-and-serving-a-realtime-mobile-object-detector-in-30-minutes-with-cloud-tpus-b78971cf1193), though of course it may be useful even if you would
like to use the Object Detection API outside the context of the tutorial.
A couple words of warning:
1. Docker containers do not have persistent storage. This means that any changes
you make to files inside the container will not persist if you restart
the container. When running through the tutorial,
**do not close the container**.
2. To be able to deploy the [Android app](
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/examples/android/app)
(which you will build at the end of the tutorial),
you will need to kill any instances of `adb` running on the host machine. You
can accomplish this by closing all instances of Android Studio, and then
running `adb kill-server`.
You can install Docker by following the [instructions here](
https://docs.docker.com/install/).
## Running The Container
From this directory, build the Dockerfile as follows (this takes a while):
```
docker build --tag detect-tf .
```
Run the container:
```
docker run --rm -it --privileged -p 6006:6006 detect-tf
```
When running the container, you will find yourself inside the `/tensorflow`
directory, which is the path to the TensorFlow [source
tree](https://github.com/tensorflow/tensorflow).
## Text Editing
The tutorial also
requires you to occasionally edit files inside the source tree.
This Docker images comes with `vim`, `nano`, and `emacs` preinstalled for your
convenience.
## What's In This Container
This container is derived from the nightly build of TensorFlow, and contains the
sources for TensorFlow at `/tensorflow`, as well as the
[TensorFlow Models](https://github.com/tensorflow/models) which are available at
`/tensorflow/models` (and contain the Object Detection API as a subdirectory
at `/tensorflow/models/research/object_detection`).
The Oxford-IIIT Pets dataset, the COCO pre-trained SSD + MobileNet (v1)
checkpoint, and example
trained model are all available in `/tmp` in their respective folders.
This container also has the `gsutil` and `gcloud` utilities, the `bazel` build
tool, and all dependencies necessary to use the Object Detection API, and
compile and install the TensorFlow Lite Android demo app.
At various points throughout the tutorial, you may see references to the
*research directory*. This refers to the `research` folder within the
models repository, located at
`/tensorflow/models/resesarch`.
|
PyTorch/Classification/GPUNet/triton | triton | run_performance_on_fw | #!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_performance_on_fw.py` script.
It infers data obtained from pointed data loader locally and calculate throughput and latency.
Those results are stored in path pointed by `--results-path` in form of CSV file.
Example call:
```shell script
python ./triton/run_performance_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-sizes 32 \
--results-path results.csv
```
"""
import argparse
import csv
import logging
import os
from pathlib import Path
from typing import List
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseRunner,
load_from_file,
)
from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("run_performance_on_fw")
def _save_result(results_path: str, results: List):
LOGGER.info(f"Storing results to {results_path}")
item = results[0]
with open(results_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=list(item.keys()))
writer.writeheader()
for result in results:
writer.writerow(result)
LOGGER.info("Done")
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(
description="Measure inference performance of given model in framework container", allow_abbrev=False
)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument(
"--batch-sizes",
type=int,
default=[1],
help="List of batch sizes to test.",
nargs="*",
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of performance iterations per batch size.",
)
parser.add_argument(
"--results-path",
help="Path to results file where performance result will be stored",
required=True,
)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
if args.iterations < 10:
raise ValueError("The minimal number of iterations for performance measurement is 10")
if not args.results_path.endswith(".csv"):
raise ValueError("Results path for results is invalid. Please, provide the CSV file name. Example: results.csv")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
results = []
with runner.init_inference(model=model) as runner_session:
for batch_size in args.batch_sizes:
LOGGER.info(f"Running performance measurement for batch size {batch_size}.")
# WAR - override batch size for dataloader
args.batch_size = batch_size
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.debug("Data loader initialized.")
for _, x, _ in dataloader_fn():
input = x
break
runner_session.start_measurement()
LOGGER.info("Running measurement")
for idx in range(args.iterations):
LOGGER.debug(f"Iteration {idx}")
runner_session(input)
throughput, latency = runner_session.stop_measurement(batch_size=batch_size)
LOGGER.info("Done")
LOGGER.info(f"Throughput: {throughput:.2f} [infer/s]")
LOGGER.info(f"Latency: {latency:.2f} [ms]")
data = {
"Batch": batch_size,
"Throughput (infer/sec)": f"{throughput:.2f}",
"Latency (ms)": f"{latency:.2f}",
}
results.append(data)
if not results:
raise RuntimeError("No valid measurement performed.")
_save_result(args.results_path, results)
if __name__ == "__main__":
main()
|
TensorFlow/Detection/SSD/models/research/object_detection/g3doc | g3doc | running_notebook | # Quick Start: Jupyter notebook for off-the-shelf inference
If you'd like to hit the ground running and run detection on a few example
images right out of the box, we recommend trying out the Jupyter notebook demo.
To run the Jupyter notebook, run the following command from
`tensorflow/models/research/object_detection`:
```
# From tensorflow/models/research/object_detection
jupyter notebook
```
The notebook should open in your favorite web browser. Click the
[`object_detection_tutorial.ipynb`](../object_detection_tutorial.ipynb) link to
open the demo.
|
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer | transformer | model_utils_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test Transformer model helper methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.nlp.transformer import model_utils
NEG_INF = -1e9
class ModelUtilsTest(tf.test.TestCase):
def test_get_padding(self):
x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]])
padding = model_utils.get_padding(x, padding_value=0)
self.assertAllEqual([[0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [1, 0, 0, 1, 0]],
padding)
def test_get_padding_bias(self):
x = tf.constant([[1, 0, 0, 0, 2], [3, 4, 0, 0, 0], [0, 5, 6, 0, 7]])
bias = model_utils.get_padding_bias(x)
bias_shape = tf.shape(bias)
flattened_bias = tf.reshape(bias, [3, 5])
self.assertAllEqual([[0, NEG_INF, NEG_INF, NEG_INF, 0],
[0, 0, NEG_INF, NEG_INF, NEG_INF],
[NEG_INF, 0, 0, NEG_INF, 0]],
flattened_bias)
self.assertAllEqual([3, 1, 1, 5], bias_shape)
def test_get_decoder_self_attention_bias(self):
length = 5
bias = model_utils.get_decoder_self_attention_bias(length)
self.assertAllEqual([[[[0, NEG_INF, NEG_INF, NEG_INF, NEG_INF],
[0, 0, NEG_INF, NEG_INF, NEG_INF],
[0, 0, 0, NEG_INF, NEG_INF],
[0, 0, 0, 0, NEG_INF],
[0, 0, 0, 0, 0]]]],
bias)
if __name__ == "__main__":
assert tf.version.VERSION.startswith('2.')
tf.test.main()
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | np_box_ops_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.np_box_ops."""
import numpy as np
import tensorflow as tf
from object_detection.utils import np_box_ops
class BoxOpsTests(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxes1 = boxes1
self.boxes2 = boxes2
def testArea(self):
areas = np_box_ops.area(self.boxes1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def testIntersection(self):
intersection = np_box_ops.intersection(self.boxes1, self.boxes2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def testIOU(self):
iou = np_box_ops.iou(self.boxes1, self.boxes2)
expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float)
self.assertAllClose(iou, expected_iou)
def testIOA(self):
boxes1 = np.array([[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75]],
dtype=np.float32)
boxes2 = np.array([[0.5, 0.25, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]],
dtype=np.float32)
ioa21 = np_box_ops.ioa(boxes2, boxes1)
expected_ioa21 = np.array([[0.5, 0.0],
[1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechSynthesis/FastPitch/phrases | phrases | phrase_8_64 | She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
|
TensorFlow/Detection/SSD/models/research/object_detection/predictors | predictors | convolutional_box_predictor | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolutional Box Predictors with and without weight sharing."""
import functools
import tensorflow as tf
from object_detection.core import box_predictor
from object_detection.utils import static_shape
slim = tf.contrib.slim
BOX_ENCODINGS = box_predictor.BOX_ENCODINGS
CLASS_PREDICTIONS_WITH_BACKGROUND = (
box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND)
MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS
class _NoopVariableScope(object):
"""A dummy class that does not push any scope."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
class ConvolutionalBoxPredictor(box_predictor.BoxPredictor):
"""Convolutional Box Predictor.
Optionally add an intermediate 1x1 convolutional layer after features and
predict in parallel branches box_encodings and
class_predictions_with_background.
Currently this box predictor assumes that predictions are "shared" across
classes --- that is each anchor makes box predictions which do not depend
on class.
"""
def __init__(self,
is_training,
num_classes,
box_prediction_head,
class_prediction_head,
other_heads,
conv_hyperparams_fn,
num_layers_before_predictor,
min_depth,
max_depth):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
box_prediction_head: The head that predicts the boxes.
class_prediction_head: The head that predicts the classes.
other_heads: A dictionary mapping head names to convolutional
head classes.
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
Raises:
ValueError: if min_depth > max_depth.
"""
super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes)
self._box_prediction_head = box_prediction_head
self._class_prediction_head = class_prediction_head
self._other_heads = other_heads
self._conv_hyperparams_fn = conv_hyperparams_fn
self._min_depth = min_depth
self._max_depth = max_depth
self._num_layers_before_predictor = num_layers_before_predictor
@property
def num_classes(self):
return self._num_classes
def _predict(self, image_features, num_predictions_per_location_list):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
predictions = {
BOX_ENCODINGS: [],
CLASS_PREDICTIONS_WITH_BACKGROUND: [],
}
for head_name in self._other_heads.keys():
predictions[head_name] = []
# TODO(rathodv): Come up with a better way to generate scope names
# in box predictor once we have time to retrain all models in the zoo.
# The following lines create scope names to be backwards compatible with the
# existing checkpoints.
box_predictor_scopes = [_NoopVariableScope()]
if len(image_features) > 1:
box_predictor_scopes = [
tf.variable_scope('BoxPredictor_{}'.format(i))
for i in range(len(image_features))
]
for (image_feature,
num_predictions_per_location, box_predictor_scope) in zip(
image_features, num_predictions_per_location_list,
box_predictor_scopes):
net = image_feature
with box_predictor_scope:
with slim.arg_scope(self._conv_hyperparams_fn()):
with slim.arg_scope([slim.dropout], is_training=self._is_training):
# Add additional conv layers before the class predictor.
features_depth = static_shape.get_depth(image_feature.get_shape())
depth = max(min(features_depth, self._max_depth), self._min_depth)
tf.logging.info('depth of additional conv before box predictor: {}'.
format(depth))
if depth > 0 and self._num_layers_before_predictor > 0:
for i in range(self._num_layers_before_predictor):
net = slim.conv2d(
net,
depth, [1, 1],
reuse=tf.AUTO_REUSE,
scope='Conv2d_%d_1x1_%d' % (i, depth))
sorted_keys = sorted(self._other_heads.keys())
sorted_keys.append(BOX_ENCODINGS)
sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND)
for head_name in sorted_keys:
if head_name == BOX_ENCODINGS:
head_obj = self._box_prediction_head
elif head_name == CLASS_PREDICTIONS_WITH_BACKGROUND:
head_obj = self._class_prediction_head
else:
head_obj = self._other_heads[head_name]
prediction = head_obj.predict(
features=net,
num_predictions_per_location=num_predictions_per_location)
predictions[head_name].append(prediction)
return predictions
# TODO(rathodv): Replace with slim.arg_scope_func_key once its available
# externally.
def _arg_scope_func_key(op):
"""Returns a key that can be used to index arg_scope dictionary."""
return getattr(op, '_key_op', str(op))
# TODO(rathodv): Merge the implementation with ConvolutionalBoxPredictor above
# since they are very similar.
class WeightSharedConvolutionalBoxPredictor(box_predictor.BoxPredictor):
"""Convolutional Box Predictor with weight sharing.
Defines the box predictor as defined in
https://arxiv.org/abs/1708.02002. This class differs from
ConvolutionalBoxPredictor in that it shares weights and biases while
predicting from different feature maps. However, batch_norm parameters are not
shared because the statistics of the activations vary among the different
feature maps.
Also note that separate multi-layer towers are constructed for the box
encoding and class predictors respectively.
"""
def __init__(self,
is_training,
num_classes,
box_prediction_head,
class_prediction_head,
other_heads,
conv_hyperparams_fn,
depth,
num_layers_before_predictor,
kernel_size=3,
apply_batch_norm=False,
share_prediction_tower=False,
use_depthwise=False):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
box_prediction_head: The head that predicts the boxes.
class_prediction_head: The head that predicts the classes.
other_heads: A dictionary mapping head names to convolutional
head classes.
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
kernel_size: Size of final convolution kernel.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
use_depthwise: Whether to use depthwise separable conv2d instead of
regular conv2d.
"""
super(WeightSharedConvolutionalBoxPredictor, self).__init__(is_training,
num_classes)
self._box_prediction_head = box_prediction_head
self._class_prediction_head = class_prediction_head
self._other_heads = other_heads
self._conv_hyperparams_fn = conv_hyperparams_fn
self._depth = depth
self._num_layers_before_predictor = num_layers_before_predictor
self._kernel_size = kernel_size
self._apply_batch_norm = apply_batch_norm
self._share_prediction_tower = share_prediction_tower
self._use_depthwise = use_depthwise
@property
def num_classes(self):
return self._num_classes
def _insert_additional_projection_layer(self, image_feature,
inserted_layer_counter,
target_channel):
if inserted_layer_counter < 0:
return image_feature, inserted_layer_counter
image_feature = slim.conv2d(
image_feature,
target_channel, [1, 1],
stride=1,
padding='SAME',
activation_fn=None,
normalizer_fn=(tf.identity if self._apply_batch_norm else None),
scope='ProjectionLayer/conv2d_{}'.format(
inserted_layer_counter))
if self._apply_batch_norm:
image_feature = slim.batch_norm(
image_feature,
scope='ProjectionLayer/conv2d_{}/BatchNorm'.format(
inserted_layer_counter))
inserted_layer_counter += 1
return image_feature, inserted_layer_counter
def _compute_base_tower(self, tower_name_scope, image_feature, feature_index,
has_different_feature_channels, target_channel,
inserted_layer_counter):
net = image_feature
for i in range(self._num_layers_before_predictor):
if self._use_depthwise:
conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
net = conv_op(
net,
self._depth, [self._kernel_size, self._kernel_size],
stride=1,
padding='SAME',
activation_fn=None,
normalizer_fn=(tf.identity if self._apply_batch_norm else None),
scope='{}/conv2d_{}'.format(tower_name_scope, i))
if self._apply_batch_norm:
net = slim.batch_norm(
net,
scope='{}/conv2d_{}/BatchNorm/feature_{}'.
format(tower_name_scope, i, feature_index))
net = tf.nn.relu6(net)
return net
def _predict_head(self, head_name, head_obj, image_feature, box_tower_feature,
feature_index, has_different_feature_channels,
target_channel, inserted_layer_counter,
num_predictions_per_location):
if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND:
tower_name_scope = 'ClassPredictionTower'
else:
raise ValueError('Unknown head')
if self._share_prediction_tower:
head_tower_feature = box_tower_feature
else:
head_tower_feature = self._compute_base_tower(
tower_name_scope=tower_name_scope,
image_feature=image_feature,
feature_index=feature_index,
has_different_feature_channels=has_different_feature_channels,
target_channel=target_channel,
inserted_layer_counter=inserted_layer_counter)
return head_obj.predict(
features=head_tower_feature,
num_predictions_per_location=num_predictions_per_location)
def _predict(self, image_features, num_predictions_per_location_list):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels] containing features for a batch of images. Note that
when not all tensors in the list have the same number of channels, an
additional projection layer will be added on top the tensor to generate
feature map with number of channels consitent with the majority.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map. Note that all values must be the same since the weights are
shared.
Returns:
A dictionary containing:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, code_size] representing the location of
the objects. Each entry in the list corresponds to a feature map in
the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
(optional) mask_predictions: A list of float tensors of shape
[batch_size, num_anchord_i, num_classes, mask_height, mask_width].
Raises:
ValueError: If the image feature maps do not have the same number of
channels or if the num predictions per locations is differs between the
feature maps.
"""
if len(set(num_predictions_per_location_list)) > 1:
raise ValueError('num predictions per location must be same for all'
'feature maps, found: {}'.format(
num_predictions_per_location_list))
feature_channels = [
image_feature.shape[3].value for image_feature in image_features
]
has_different_feature_channels = len(set(feature_channels)) > 1
if has_different_feature_channels:
inserted_layer_counter = 0
target_channel = max(set(feature_channels), key=feature_channels.count)
tf.logging.info('Not all feature maps have the same number of '
'channels, found: {}, addition project layers '
'to bring all feature maps to uniform channels '
'of {}'.format(feature_channels, target_channel))
else:
# Place holder variables if has_different_feature_channels is False.
target_channel = -1
inserted_layer_counter = -1
predictions = {
BOX_ENCODINGS: [],
CLASS_PREDICTIONS_WITH_BACKGROUND: [],
}
for head_name in self._other_heads.keys():
predictions[head_name] = []
for feature_index, (image_feature,
num_predictions_per_location) in enumerate(
zip(image_features,
num_predictions_per_location_list)):
with tf.variable_scope('WeightSharedConvolutionalBoxPredictor',
reuse=tf.AUTO_REUSE):
with slim.arg_scope(self._conv_hyperparams_fn()):
(image_feature,
inserted_layer_counter) = self._insert_additional_projection_layer(
image_feature, inserted_layer_counter, target_channel)
if self._share_prediction_tower:
box_tower_scope = 'PredictionTower'
else:
box_tower_scope = 'BoxPredictionTower'
box_tower_feature = self._compute_base_tower(
tower_name_scope=box_tower_scope,
image_feature=image_feature,
feature_index=feature_index,
has_different_feature_channels=has_different_feature_channels,
target_channel=target_channel,
inserted_layer_counter=inserted_layer_counter)
box_encodings = self._box_prediction_head.predict(
features=box_tower_feature,
num_predictions_per_location=num_predictions_per_location)
predictions[BOX_ENCODINGS].append(box_encodings)
sorted_keys = sorted(self._other_heads.keys())
sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND)
for head_name in sorted_keys:
if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND:
head_obj = self._class_prediction_head
else:
head_obj = self._other_heads[head_name]
prediction = self._predict_head(
head_name=head_name,
head_obj=head_obj,
image_feature=image_feature,
box_tower_feature=box_tower_feature,
feature_index=feature_index,
has_different_feature_channels=has_different_feature_channels,
target_channel=target_channel,
inserted_layer_counter=inserted_layer_counter,
num_predictions_per_location=num_predictions_per_location)
predictions[head_name].append(prediction)
return predictions
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | hyperparams_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests object_detection.core.hyperparams_builder."""
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
slim = tf.contrib.slim
def _get_scope_key(op):
return getattr(op, '_key_op', str(op))
class HyperparamsBuilderTest(tf.test.TestCase):
def test_default_arg_scope_has_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertTrue(_get_scope_key(slim.conv2d) in scope)
def test_default_arg_scope_has_separable_conv2d_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertTrue(_get_scope_key(slim.separable_conv2d) in scope)
def test_default_arg_scope_has_conv2d_transpose_op(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertTrue(_get_scope_key(slim.conv2d_transpose) in scope)
def test_explicit_fc_op_arg_scope_has_fully_connected_op(self):
conv_hyperparams_text_proto = """
op: FC
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
self.assertTrue(_get_scope_key(slim.fully_connected) in scope)
def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
kwargs_1, kwargs_2, kwargs_3 = scope.values()
self.assertDictEqual(kwargs_1, kwargs_2)
self.assertDictEqual(kwargs_1, kwargs_3)
def test_return_l1_regularized_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope.values()[0]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l1_regularized_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l1_regularizer {
weight: 0.5
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.abs(weights).sum() * 0.5, result)
def test_return_l2_regularizer_weights(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
regularizer = conv_scope_arguments['weights_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_l2_regularizer_weights_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
weight: 0.42
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
regularizer = keras_config.params()['kernel_regularizer']
weights = np.array([1., -1, 4., 2.])
with self.test_session() as sess:
result = sess.run(regularizer(tf.constant(weights)))
self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result)
def test_return_non_default_batch_norm_params_with_train_during_train(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertTrue(batch_norm_params['is_training'])
def test_return_non_default_batch_norm_params_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params()
self.assertAlmostEqual(batch_norm_params['momentum'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
batch_norm_layer = keras_config.build_batch_norm()
self.assertTrue(isinstance(batch_norm_layer,
freezable_batch_norm.FreezableBatchNorm))
def test_return_non_default_batch_norm_params_keras_override(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertTrue(keras_config.use_batch_norm())
batch_norm_params = keras_config.batch_norm_params(momentum=0.4)
self.assertAlmostEqual(batch_norm_params['momentum'], 0.4)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
def test_return_batch_norm_params_with_notrain_during_eval(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: true
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=False)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_return_batch_norm_params_with_notrain_when_train_is_false(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
decay: 0.7
center: false
scale: true
epsilon: 0.03
train: false
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm)
batch_norm_params = scope[_get_scope_key(slim.batch_norm)]
self.assertAlmostEqual(batch_norm_params['decay'], 0.7)
self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03)
self.assertFalse(batch_norm_params['center'])
self.assertTrue(batch_norm_params['scale'])
self.assertFalse(batch_norm_params['is_training'])
def test_do_not_use_batch_norm_if_default(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['normalizer_fn'], None)
def test_do_not_use_batch_norm_if_default_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertFalse(keras_config.use_batch_norm())
self.assertEqual(keras_config.batch_norm_params(), {})
# The batch norm builder should build an identity Lambda layer
identity_layer = keras_config.build_batch_norm()
self.assertTrue(isinstance(identity_layer,
tf.keras.layers.Lambda))
def test_use_none_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], None)
def test_use_none_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: NONE
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertEqual(keras_config.params()['activation'], None)
self.assertEqual(
keras_config.params(include_activation=True)['activation'], None)
activation_layer = keras_config.build_activation_layer()
self.assertTrue(isinstance(activation_layer, tf.keras.layers.Lambda))
self.assertEqual(activation_layer.function, tf.identity)
def test_use_relu_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu)
def test_use_relu_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertEqual(keras_config.params()['activation'], None)
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu)
activation_layer = keras_config.build_activation_layer()
self.assertTrue(isinstance(activation_layer, tf.keras.layers.Lambda))
self.assertEqual(activation_layer.function, tf.nn.relu)
def test_use_relu_6_activation(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6)
def test_use_relu_6_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
self.assertEqual(keras_config.params()['activation'], None)
self.assertEqual(
keras_config.params(include_activation=True)['activation'], tf.nn.relu6)
activation_layer = keras_config.build_activation_layer()
self.assertTrue(isinstance(activation_layer, tf.keras.layers.Lambda))
self.assertEqual(activation_layer.function, tf.nn.relu6)
def test_override_activation_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
activation: RELU_6
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
new_params = keras_config.params(activation=tf.nn.relu)
self.assertEqual(new_params['activation'], tf.nn.relu)
def _assert_variance_in_range(self, initializer, shape, variance,
tol=1e-2):
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = tf.get_variable(
name='test',
shape=shape,
dtype=tf.float32,
initializer=initializer)
sess.run(tf.global_variables_initializer())
values = sess.run(var)
self.assertAllClose(np.var(values), variance, tol, tol)
def test_variance_in_range_with_variance_scaling_initializer_fan_in(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_in_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_out_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_OUT
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 40.)
def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_fan_avg_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_AVG
uniform: false
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=4. / (100. + 40.))
def test_variance_in_range_with_variance_scaling_initializer_uniform(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_variance_scaling_initializer_uniform_keras(
self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
variance_scaling_initializer {
factor: 2.0
mode: FAN_IN
uniform: true
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=2. / 100.)
def test_variance_in_range_with_truncated_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_truncated_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.49, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
scope_fn = hyperparams_builder.build(conv_hyperparams_proto,
is_training=True)
scope = scope_fn()
conv_scope_arguments = scope[_get_scope_key(slim.conv2d)]
initializer = conv_scope_arguments['weights_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
def test_variance_in_range_with_random_normal_initializer_keras(self):
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.8
}
}
"""
conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
keras_config = hyperparams_builder.KerasLayerHyperparams(
conv_hyperparams_proto)
initializer = keras_config.params()['kernel_initializer']
self._assert_variance_in_range(initializer, shape=[100, 40],
variance=0.64, tol=1e-1)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Classification/GPUNet/triton/runner | runner | task | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import platform
import subprocess
from datetime import datetime
from typing import Dict, List, Optional, Union
import cpuinfo
import psutil
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import PerformanceTool
from .core import CustomDumper, DataObject
from .experiment import Experiment
from .triton import Triton
class GPU(DataObject):
"""
GPU information data object
"""
name: str
driver_version: str
cuda_version: str
memory: str
tdp: str
def __init__(self, name: str, driver_version: str, cuda_version: str, memory: str, tdp: str):
"""
Args:
name: name of GPU
driver_version: version of driver
cuda_version: version of CUDA
memory: size of memory available on GPU [MB]
tdp: Max TDP of GPU unit
"""
self.name = name
self.driver_version = driver_version
self.cuda_version = cuda_version
self.memory = memory
self.tdp = tdp
@staticmethod
def from_dict(data: Dict):
"""
Create GPU object from dictionary
Args:
data: dictionary with GPU data
Returns:
GPU object
"""
return GPU(
name=data["name"],
driver_version=data["driver_version"],
cuda_version=data["cuda_version"],
memory=data["memory"],
tdp=data["tdp"],
)
@staticmethod
def from_host():
"""
Create GPU object from host data
Returns:
GPU object
"""
data = subprocess.check_output(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total,power.max_limit", "--format=csv"]
).decode()
lines = data.split(sep="\n")
device_details = lines[1].split(",")
name = device_details[0].strip()
driver_version = device_details[1].strip()
memory = device_details[2].strip()
tdp = device_details[3].strip()
cuda_version = None
data = subprocess.check_output(["nvidia-smi", "--query"]).decode()
lines = data.split(sep="\n")
for line in lines:
if line.startswith("CUDA Version"):
cuda_version = line.split(":")[1].strip()
break
return GPU(
name=name,
driver_version=driver_version,
cuda_version=cuda_version,
memory=memory,
tdp=tdp,
)
class CPU(DataObject):
"""
CPU details
"""
name: str
physical_cores: int
logical_cores: int
min_frequency: float
max_frequency: float
def __init__(self, name: str, physical_cores: int, logical_cores: int, min_frequency: float, max_frequency: float):
"""
Args:
name: name of CPU unit
physical_cores: number of physical cores available on CPU
logical_cores: number of logical cores available on CPU
min_frequency: minimal clock frequency
max_frequency: maximal clock frequency
"""
self.name = name
self.physical_cores = physical_cores
self.logical_cores = logical_cores
self.min_frequency = min_frequency
self.max_frequency = max_frequency
@staticmethod
def from_host():
"""
Create CPU object from host data
Returns:
CPU object
"""
return CPU(
name=cpuinfo.get_cpu_info()["brand_raw"],
physical_cores=psutil.cpu_count(logical=False),
logical_cores=psutil.cpu_count(logical=True),
min_frequency=psutil.cpu_freq().min,
max_frequency=psutil.cpu_freq().max,
)
class Memory(DataObject):
"""
Memory data object
"""
size: float
def __init__(self, size: float):
"""
Args:
size: RAM memory size in MB
"""
self.size = size
@staticmethod
def from_host():
"""
Create Memory object from host data
Returns:
Memory object
"""
svm = psutil.virtual_memory()
return Memory(size=svm.total)
class SystemInfo(DataObject):
"""
System Information data object
"""
system: str
cpu: CPU
memory: Memory
gpu: GPU
def __init__(self, system: str, cpu: CPU, memory: Memory, gpu: GPU):
"""
Args:
system: name of operating system
cpu: CPU info
memory: Memory info
gpu: GPU info
"""
self.system = system
self.cpu = cpu
self.memory = memory
self.gpu = gpu
@staticmethod
def from_host():
"""
Create SystemInfo object from host data
Returns:
SystemInfo object
"""
system = platform.platform()
gpu = GPU.from_host()
memory = Memory.from_host()
cpu = CPU.from_host()
return SystemInfo(system=system, cpu=cpu, gpu=gpu, memory=memory)
class Checkpoint(DataObject):
"""
Checkpoint data object
"""
def __init__(self, name: str, url: str, path: Union[str, pathlib.Path]):
"""
Args:
name: Name of checkpoint
path: Location of checkpoint on local hardware
"""
self.name = name
self.url = url
self.path = pathlib.Path(path)
class Dataset(DataObject):
"""
Dataset data object
"""
def __init__(self, name: str):
"""
Args:
name: Name of dataset
"""
self.name = name
class Task(DataObject):
"""
Task data object to store build information
"""
model_name: str
framework: str
batching: str
started_at: int
ended_at: Optional[int]
container_version: str
checkpoints: Dict[str, Checkpoint]
datasets: Dict[str, Dataset]
datasets_dir: Optional[Union[str, pathlib.Path]]
experiments: List[Experiment]
system_info: SystemInfo
triton_container_image: Optional[str]
triton_custom_operations: Optional[str]
performance_tool: PerformanceTool
filename: str = "task.yaml"
results_dir: str = "results"
checkpoints_dir: str = "checkpoints"
def __init__(
self,
model_name: str,
ensemble_model_name: Optional[str],
framework: str,
batching: str,
container_version: str,
checkpoints: Dict,
datasets: Dict,
experiments: List,
system_info: SystemInfo,
started_at: int,
datasets_dir: Optional[Union[str, pathlib.Path]] = None,
ended_at: Optional[int] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: str = Triton.LOAD_MODE.EXPLICIT,
measurement_steps_offline: int = 8,
measurement_steps_online: int = 32,
performance_tool: PerformanceTool = PerformanceTool.MODEL_ANALYZER,
):
"""
Args:
model_name: Name of model
framework: Model framework
container_version: Container version used in task
checkpoints: List of checkpoints
datasets: List of datasets
datasets_dir: Directory where datasests are stored
experiments: List of experiments run as part of task
system_info: information about node on which experiment was executed
started_at: Time when task has started
ended_at: Time when task has ended
triton_container_image: Custom Triton Container Image used for task
triton_custom_operations: Custom operation library path
triton_load_model_method: Method how models are loaded on Triton
measurement_steps_offline: Number of measurement steps in offline performance stage
measurement_steps_online: Number of measurement steps in online performance stage
performance_tool: Performance Tool used for generating results
"""
self.started_at = started_at
self.ended_at = ended_at
self.model_name = model_name
self.ensemble_model_name = ensemble_model_name
self.framework = framework
self.container_version = container_version
self.checkpoints = checkpoints
self.datasets = datasets
self.datasets_dir = pathlib.Path(datasets_dir)
self.experiments = experiments
self.system_info = system_info
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.triton_load_model_method = triton_load_model_method
self.measurement_steps_offline = measurement_steps_offline
self.measurement_steps_online = measurement_steps_online
self.logs_dir = pathlib.Path("/var/logs")
self.batching = batching
self.performance_tool = performance_tool
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.ended_at = int(datetime.utcnow().timestamp())
def to_file(self, file_path: Union[pathlib.Path, str]):
"""
Store task data to YAML file
Args:
file_path: path to file where task data has to be saved
Returns:
None
"""
task_data = self.to_dict()
with open(file_path, "w") as f:
yaml.dump(task_data, f, Dumper=CustomDumper, width=240, sort_keys=False)
|
PyTorch/Classification/ConvNets/image_classification/models | models | resnet | # Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from collections import OrderedDict
from dataclasses import dataclass
from typing import List, Dict, Callable, Any, Type
import torch
import torch.nn as nn
from .common import (
SqueezeAndExcitation,
LayerBuilder,
SqueezeAndExcitationTRT,
)
from .model import (
Model,
ModelParams,
ModelArch,
EntryPoint,
)
__all__ = ["ResNet", "resnet_configs"]
# BasicBlock {{{
class BasicBlock(nn.Module):
def __init__(
self,
builder,
inplanes,
planes,
expansion,
stride=1,
cardinality=1,
downsample=None,
fused_se=True,
last_bn_0_init=False,
trt=False,
):
super(BasicBlock, self).__init__()
self.conv1 = builder.conv3x3(inplanes, planes, stride, groups=cardinality)
self.bn1 = builder.batchnorm(planes)
self.relu = builder.activation()
self.conv2 = builder.conv3x3(
planes, planes * expansion, groups=cardinality
)
self.bn2 = builder.batchnorm(planes * expansion, zero_init=last_bn_0_init)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.bn1 is not None:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.bn2 is not None:
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# BasicBlock }}}
# Bottleneck {{{
class Bottleneck(nn.Module):
def __init__(
self,
builder,
inplanes,
planes,
expansion,
stride=1,
cardinality=1,
se=False,
se_squeeze=16,
downsample=None,
fused_se=True,
last_bn_0_init=False,
trt=False,
):
super(Bottleneck, self).__init__()
self.conv1 = builder.conv1x1(inplanes, planes)
self.bn1 = builder.batchnorm(planes)
self.conv2 = builder.conv3x3(planes, planes, groups=cardinality, stride=stride)
self.bn2 = builder.batchnorm(planes)
self.conv3 = builder.conv1x1(planes, planes * expansion)
self.bn3 = builder.batchnorm(planes * expansion, zero_init=last_bn_0_init)
self.relu = builder.activation()
self.downsample = downsample
self.stride = stride
self.fused_se = fused_se
if se:
self.squeeze = (
SqueezeAndExcitation(
planes * expansion, se_squeeze, builder.activation()
)
if not trt
else SqueezeAndExcitationTRT(
planes * expansion, se_squeeze, builder.activation()
)
)
else:
self.squeeze = None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.squeeze is None:
out += residual
else:
if self.fused_se:
out = torch.addcmul(residual, out, self.squeeze(out), value=1)
else:
out = residual + out * self.squeeze(out)
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
def __init__(
self,
builder,
inplanes,
planes,
expansion,
stride=1,
cardinality=1,
downsample=None,
fused_se=True,
last_bn_0_init=False,
trt=False,
):
super(SEBottleneck, self).__init__(
builder,
inplanes,
planes,
expansion,
stride=stride,
cardinality=cardinality,
se=True,
se_squeeze=16,
downsample=downsample,
fused_se=fused_se,
last_bn_0_init=last_bn_0_init,
trt=trt,
)
# Bottleneck }}}
class ResNet(nn.Module):
@dataclass
class Arch(ModelArch):
block: Type[Bottleneck]
layers: List[int] # arch
widths: List[int] # arch
expansion: int
cardinality: int = 1
stem_width: int = 64
activation: str = "relu"
default_image_size: int = 224
@dataclass
class Params(ModelParams):
num_classes: int = 1000
last_bn_0_init: bool = False
conv_init: str = "fan_in"
trt: bool = False
fused_se: bool = True
def parser(self, name):
p = super().parser(name)
p.add_argument(
"--num_classes",
metavar="N",
default=self.num_classes,
type=int,
help="number of classes",
)
p.add_argument(
"--last_bn_0_init",
metavar="True|False",
default=self.last_bn_0_init,
type=bool,
)
p.add_argument(
"--conv_init",
default=self.conv_init,
choices=["fan_in", "fan_out"],
type=str,
help="initialization mode for convolutional layers, see https://pytorch.org/docs/stable/nn.init.html#torch.nn.init.kaiming_normal_",
)
p.add_argument("--trt", metavar="True|False", default=self.trt, type=bool)
p.add_argument(
"--fused_se", metavar="True|False", default=self.fused_se, type=bool
)
return p
def __init__(
self,
arch: Arch,
num_classes: int = 1000,
last_bn_0_init: bool = False,
conv_init: str = "fan_in",
trt: bool = False,
fused_se: bool = True,
):
super(ResNet, self).__init__()
self.arch = arch
self.builder = LayerBuilder(
LayerBuilder.Config(activation=arch.activation, conv_init=conv_init)
)
self.last_bn_0_init = last_bn_0_init
self.conv1 = self.builder.conv7x7(3, arch.stem_width, stride=2)
self.bn1 = self.builder.batchnorm(arch.stem_width)
self.relu = self.builder.activation()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
inplanes = arch.stem_width
assert len(arch.widths) == len(arch.layers)
self.num_layers = len(arch.widths)
layers = []
for i, (w, l) in enumerate(zip(arch.widths, arch.layers)):
layer, inplanes = self._make_layer(
arch.block,
arch.expansion,
inplanes,
w,
l,
cardinality=arch.cardinality,
stride=1 if i == 0 else 2,
trt=trt,
fused_se=fused_se,
)
layers.append(layer)
self.layers = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(arch.widths[-1] * arch.expansion, num_classes)
def stem(self, x):
x = self.conv1(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
return x
def classifier(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layers(x)
x = self.classifier(x)
return x
def extract_features(self, x, layers=None):
if layers is None:
layers = [f"layer{i+1}" for i in range(self.num_layers)] + ["classifier"]
run = [
i
for i in range(self.num_layers)
if "classifier" in layers
or any([f"layer{j+1}" in layers for j in range(i, self.num_layers)])
]
output = {}
x = self.stem(x)
for l in run:
fn = self.layers[l]
x = fn(x)
if f"layer{l+1}" in layers:
output[f"layer{l+1}"] = x
if "classifier" in layers:
output["classifier"] = self.classifier(x)
return output
# helper functions {{{
def _make_layer(
self,
block,
expansion,
inplanes,
planes,
blocks,
stride=1,
cardinality=1,
trt=False,
fused_se=True,
):
downsample = None
if stride != 1 or inplanes != planes * expansion:
dconv = self.builder.conv1x1(inplanes, planes * expansion, stride=stride)
dbn = self.builder.batchnorm(planes * expansion)
if dbn is not None:
downsample = nn.Sequential(dconv, dbn)
else:
downsample = dconv
layers = []
for i in range(blocks):
layers.append(
block(
self.builder,
inplanes,
planes,
expansion,
stride=stride if i == 0 else 1,
cardinality=cardinality,
downsample=downsample if i == 0 else None,
fused_se=fused_se,
last_bn_0_init=self.last_bn_0_init,
trt=trt,
)
)
inplanes = planes * expansion
return nn.Sequential(*layers), inplanes
def ngc_checkpoint_remap(self, url=None, version=None):
if version is None:
version = url.split("/")[8]
def to_sequential_remap(s):
splited = s.split(".")
if splited[0].startswith("layer"):
return ".".join(
["layers." + str(int(splited[0][len("layer") :]) - 1)] + splited[1:]
)
else:
return s
def no_remap(s):
return s
return {"20.06.0": to_sequential_remap}.get(version, no_remap)
# }}}
__models: Dict[str, Model] = {
"resnet50": Model(
constructor=ResNet,
arch=ResNet.Arch(
stem_width=64,
block=Bottleneck,
layers=[3, 4, 6, 3],
widths=[64, 128, 256, 512],
expansion=4,
default_image_size=224,
),
params=ResNet.Params(),
checkpoint_url="https://api.ngc.nvidia.com/v2/models/nvidia/resnet50_pyt_amp/versions/20.06.0/files/nvidia_resnet50_200821.pth.tar",
),
"resnext101-32x4d": Model(
constructor=ResNet,
arch=ResNet.Arch(
stem_width=64,
block=Bottleneck,
layers=[3, 4, 23, 3],
widths=[128, 256, 512, 1024],
expansion=2,
cardinality=32,
default_image_size=224,
),
params=ResNet.Params(),
checkpoint_url="https://api.ngc.nvidia.com/v2/models/nvidia/resnext101_32x4d_pyt_amp/versions/20.06.0/files/nvidia_resnext101-32x4d_200821.pth.tar",
),
"se-resnext101-32x4d": Model(
constructor=ResNet,
arch=ResNet.Arch(
stem_width=64,
block=SEBottleneck,
layers=[3, 4, 23, 3],
widths=[128, 256, 512, 1024],
expansion=2,
cardinality=32,
default_image_size=224,
),
params=ResNet.Params(),
checkpoint_url="https://api.ngc.nvidia.com/v2/models/nvidia/seresnext101_32x4d_pyt_amp/versions/20.06.0/files/nvidia_se-resnext101-32x4d_200821.pth.tar",
),
}
_ce = lambda n: EntryPoint.create(n, __models[n])
resnet50 = _ce("resnet50")
resnext101_32x4d = _ce("resnext101-32x4d")
se_resnext101_32x4d = _ce("se-resnext101-32x4d")
|
PyTorch/LanguageModeling/BERT/triton/dist6l/runner | runner | pipeline_impl | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
if [[ "${EXPORT_FORMAT}" == "trt" ]]; then
export FLAG="--fixed-batch-dim"
else
export FLAG=""
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--dataloader triton/dataloader.py \
--ignore-unknown-parameters \
--onnx-opset 13 \
${FLAG} \
\
--config-file ${CHECKPOINT_DIR}/config.json \
--checkpoint ${CHECKPOINT_DIR}/pytorch_model.bin \
--precision ${EXPORT_PRECISION} \
\
--vocab-file ${CHECKPOINT_DIR}/vocab.txt \
--max-seq-length ${MAX_SEQ_LENGTH} \
--predict-file ${DATASETS_DIR}/data/squad/v1.1/dev-v1.1.json \
--batch-size ${MAX_BATCH_SIZE}
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
if [ "${EXPORT_FORMAT}" != "${FORMAT}" ]; then
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--inputs input__0:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \
--inputs input__1:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \
--inputs input__2:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \
--min-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
--max-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
--opt-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \
--max-batch-size ${MAX_BATCH_SIZE} \
--tensorrt-max-workspace-size 8589934592 \
--atol 2 output__0=5.0 \
output__1=5.0 \
--rtol 1 output__0=5.0 \
output__1=5.0 \
| grep -v "broadcasting input1 to make tensors conform"
else
mv ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} ${SHARED_DIR}/converted_model
mv ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX}.yaml ${SHARED_DIR}/converted_model.yaml 2>/dev/null || true
fi
""",
)
)
pipeline.model_deploy(
commands=(
r"""
if [[ "${FORMAT}" == "ts-trace" || "${FORMAT}" == "ts-script" ]]; then
export CONFIG_FORMAT="torchscript"
else
export CONFIG_FORMAT="${FORMAT}"
fi
if [[ "${FORMAT}" == "trt" ]]; then
export MBS="0"
else
export MBS="${MAX_BATCH_SIZE}"
fi
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${CONFIG_FORMAT} \
--model-control-mode ${TRITON_LOAD_MODEL_METHOD} \
--verbose \
--load-model \
--load-model-timeout-s 100 \
\
--backend-accelerator ${ACCELERATOR} \
--tensorrt-precision ${ACCELERATOR_PRECISION} \
--max-batch-size ${MBS} \
--preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \
--max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \
--engine-count-per-device gpu=${TRITON_GPU_ENGINE_COUNT}
""",
)
)
pipeline.triton_prepare_performance_profiling_data(
commands=(
r"""
mkdir -p ${SHARED_DIR}/input_data
""",
r"""
python triton/prepare_input_data.py \
--dataloader triton/dataloader.py \
--input-data-dir ${SHARED_DIR}/input_data \
\
--batch-size ${MAX_BATCH_SIZE} \
--max-seq-length ${MAX_SEQ_LENGTH} \
--predict-file ${DATASETS_DIR}/data/squad/v1.1/dev-v1.1.json \
--vocab-file ${CHECKPOINT_DIR}/vocab.txt
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data/data.json \
--input-shapes input__0:${MAX_SEQ_LENGTH} \
--input-shapes input__1:${MAX_SEQ_LENGTH} \
--input-shapes input__2:${MAX_SEQ_LENGTH} \
--batch-sizes ${BATCH_SIZE} \
--number-of-triton-instances ${TRITON_INSTANCES} \
--number-of-model-instances ${TRITON_GPU_ENGINE_COUNT} \
--batching-mode static \
--evaluation-mode offline \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
|
TensorFlow/Detection/SSD/models/research/slim | slim | README | # TensorFlow-Slim image classification model library
[TF-slim](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim)
is a new lightweight high-level API of TensorFlow (`tensorflow.contrib.slim`)
for defining, training and evaluating complex
models. This directory contains
code for training and evaluating several widely used Convolutional Neural
Network (CNN) image classification models using TF-slim.
It contains scripts that will allow
you to train models from scratch or fine-tune them from pre-trained network
weights. It also contains code for downloading standard image datasets,
converting them
to TensorFlow's native TFRecord format and reading them in using TF-Slim's
data reading and queueing utilities. You can easily train any model on any of
these datasets, as we demonstrate below. We've also included a
[jupyter notebook](https://github.com/tensorflow/models/blob/master/research/slim/slim_walkthrough.ipynb),
which provides working examples of how to use TF-Slim for image classification.
For developing or modifying your own models, see also the [main TF-Slim page](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim).
## Contacts
Maintainers of TF-slim:
* Nathan Silberman,
github: [nathansilberman](https://github.com/nathansilberman)
* Sergio Guadarrama, github: [sguada](https://github.com/sguada)
## Citation
"TensorFlow-Slim image classification model library"
N. Silberman and S. Guadarrama, 2016.
https://github.com/tensorflow/models/tree/master/research/slim
## Table of contents
<a href="#Install">Installation and setup</a><br>
<a href='#Data'>Preparing the datasets</a><br>
<a href='#Pretrained'>Using pre-trained models</a><br>
<a href='#Training'>Training from scratch</a><br>
<a href='#Tuning'>Fine tuning to a new task</a><br>
<a href='#Eval'>Evaluating performance</a><br>
<a href='#Export'>Exporting Inference Graph</a><br>
<a href='#Troubleshooting'>Troubleshooting</a><br>
# Installation
<a id='Install'></a>
In this section, we describe the steps required to install the appropriate
prerequisite packages.
## Installing latest version of TF-slim
TF-Slim is available as `tf.contrib.slim` via TensorFlow 1.0. To test that your
installation is working, execute the following command; it should run without
raising any errors.
```
python -c "import tensorflow.contrib.slim as slim; eval = slim.evaluation.evaluate_once"
```
## Installing the TF-slim image models library
To use TF-Slim for image classification, you also have to install
the [TF-Slim image models library](https://github.com/tensorflow/models/tree/master/research/slim),
which is not part of the core TF library.
To do this, check out the
[tensorflow/models](https://github.com/tensorflow/models/) repository as follows:
```bash
cd $HOME/workspace
git clone https://github.com/tensorflow/models/
```
This will put the TF-Slim image models library in `$HOME/workspace/models/research/slim`.
(It will also create a directory called
[models/inception](https://github.com/tensorflow/models/tree/master/research/inception),
which contains an older version of slim; you can safely ignore this.)
To verify that this has worked, execute the following commands; it should run
without raising any errors.
```
cd $HOME/workspace/models/research/slim
python -c "from nets import cifarnet; mynet = cifarnet.cifarnet"
```
# Preparing the datasets
<a id='Data'></a>
As part of this library, we've included scripts to download several popular
image datasets (listed below) and convert them to slim format.
Dataset | Training Set Size | Testing Set Size | Number of Classes | Comments
:------:|:---------------:|:---------------------:|:-----------:|:-----------:
Flowers|2500 | 2500 | 5 | Various sizes (source: Flickr)
[Cifar10](https://www.cs.toronto.edu/~kriz/cifar.html) | 60k| 10k | 10 |32x32 color
[MNIST](http://yann.lecun.com/exdb/mnist/)| 60k | 10k | 10 | 28x28 gray
[ImageNet](http://www.image-net.org/challenges/LSVRC/2012/)|1.2M| 50k | 1000 | Various sizes
## Downloading and converting to TFRecord format
For each dataset, we'll need to download the raw data and convert it to
TensorFlow's native
[TFRecord](https://www.tensorflow.org/versions/r0.10/api_docs/python/python_io.html#tfrecords-format-details)
format. Each TFRecord contains a
[TF-Example](https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/core/example/example.proto)
protocol buffer. Below we demonstrate how to do this for the Flowers dataset.
```shell
$ DATA_DIR=/tmp/data/flowers
$ python download_and_convert_data.py \
--dataset_name=flowers \
--dataset_dir="${DATA_DIR}"
```
When the script finishes you will find several TFRecord files created:
```shell
$ ls ${DATA_DIR}
flowers_train-00000-of-00005.tfrecord
...
flowers_train-00004-of-00005.tfrecord
flowers_validation-00000-of-00005.tfrecord
...
flowers_validation-00004-of-00005.tfrecord
labels.txt
```
These represent the training and validation data, sharded over 5 files each.
You will also find the `$DATA_DIR/labels.txt` file which contains the mapping
from integer labels to class names.
You can use the same script to create the mnist and cifar10 datasets.
However, for ImageNet, you have to follow the instructions
[here](https://github.com/tensorflow/models/blob/master/research/inception/README.md#getting-started).
Note that you first have to sign up for an account at image-net.org.
Also, the download can take several hours, and could use up to 500GB.
## Creating a TF-Slim Dataset Descriptor.
Once the TFRecord files have been created, you can easily define a Slim
[Dataset](https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/contrib/slim/python/slim/data/dataset.py),
which stores pointers to the data file, as well as various other pieces of
metadata, such as the class labels, the train/test split, and how to parse the
TFExample protos. We have included the TF-Slim Dataset descriptors
for
[Cifar10](https://github.com/tensorflow/models/blob/master/research/slim/datasets/cifar10.py),
[ImageNet](https://github.com/tensorflow/models/blob/master/research/slim/datasets/imagenet.py),
[Flowers](https://github.com/tensorflow/models/blob/master/research/slim/datasets/flowers.py),
and
[MNIST](https://github.com/tensorflow/models/blob/master/research/slim/datasets/mnist.py).
An example of how to load data using a TF-Slim dataset descriptor using a
TF-Slim
[DatasetDataProvider](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py)
is found below:
```python
import tensorflow as tf
from datasets import flowers
slim = tf.contrib.slim
# Selects the 'validation' dataset.
dataset = flowers.get_split('validation', DATA_DIR)
# Creates a TF-Slim DataProvider which reads the dataset in the background
# during both training and testing.
provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
[image, label] = provider.get(['image', 'label'])
```
## An automated script for processing ImageNet data.
Training a model with the ImageNet dataset is a common request. To facilitate
working with the ImageNet dataset, we provide an automated script for
downloading and processing the ImageNet dataset into the native TFRecord
format.
The TFRecord format consists of a set of sharded files where each entry is a serialized `tf.Example` proto. Each `tf.Example` proto contains the ImageNet image (JPEG encoded) as well as metadata such as label and bounding box information.
We provide a single [script](datasets/download_and_preprocess_imagenet.sh) for
downloading and converting ImageNet data to TFRecord format. Downloading and
preprocessing the data may take several hours (up to half a day) depending on
your network and computer speed. Please be patient.
To begin, you will need to sign up for an account with [ImageNet]
(http://image-net.org) to gain access to the data. Look for the sign up page,
create an account and request an access key to download the data.
After you have `USERNAME` and `PASSWORD`, you are ready to run our script. Make
sure that your hard disk has at least 500 GB of free space for downloading and
storing the data. Here we select `DATA_DIR=$HOME/imagenet-data` as such a
location but feel free to edit accordingly.
When you run the below script, please enter *USERNAME* and *PASSWORD* when
prompted. This will occur at the very beginning. Once these values are entered,
you will not need to interact with the script again.
```shell
# location of where to place the ImageNet data
DATA_DIR=$HOME/imagenet-data
# build the preprocessing script.
bazel build slim/download_and_preprocess_imagenet
# run it
bazel-bin/slim/download_and_preprocess_imagenet "${DATA_DIR}"
```
The final line of the output script should read:
```shell
2016-02-17 14:30:17.287989: Finished writing all 1281167 images in data set.
```
When the script finishes you will find 1024 and 128 training and validation
files in the `DATA_DIR`. The files will match the patterns `train-????-of-1024`
and `validation-?????-of-00128`, respectively.
[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You are now
ready to train or evaluate with the ImageNet data set.
# Pre-trained Models
<a id='Pretrained'></a>
Neural nets work best when they have many parameters, making them powerful
function approximators.
However, this means they must be trained on very large datasets. Because
training models from scratch can be a very computationally intensive process
requiring days or even weeks, we provide various pre-trained models,
as listed below. These CNNs have been trained on the
[ILSVRC-2012-CLS](http://www.image-net.org/challenges/LSVRC/2012/)
image classification dataset.
In the table below, we list each model, the corresponding
TensorFlow model file, the link to the model checkpoint, and the top 1 and top 5
accuracy (on the imagenet test set).
Note that the VGG and ResNet V1 parameters have been converted from their original
caffe formats
([here](https://github.com/BVLC/caffe/wiki/Model-Zoo#models-used-by-the-vgg-team-in-ilsvrc-2014)
and
[here](https://github.com/KaimingHe/deep-residual-networks)),
whereas the Inception and ResNet V2 parameters have been trained internally at
Google. Also be aware that these accuracies were computed by evaluating using a
single image crop. Some academic papers report higher accuracy by using multiple
crops at multiple scales.
Model | TF-Slim File | Checkpoint | Top-1 Accuracy| Top-5 Accuracy |
:----:|:------------:|:----------:|:-------:|:--------:|
[Inception V1](http://arxiv.org/abs/1409.4842v1)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v1.py)|[inception_v1_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz)|69.8|89.6|
[Inception V2](http://arxiv.org/abs/1502.03167)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v2.py)|[inception_v2_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v2_2016_08_28.tar.gz)|73.9|91.8|
[Inception V3](http://arxiv.org/abs/1512.00567)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v3.py)|[inception_v3_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz)|78.0|93.9|
[Inception V4](http://arxiv.org/abs/1602.07261)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v4.py)|[inception_v4_2016_09_09.tar.gz](http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz)|80.2|95.2|
[Inception-ResNet-v2](http://arxiv.org/abs/1602.07261)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_resnet_v2.py)|[inception_resnet_v2_2016_08_30.tar.gz](http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz)|80.4|95.3|
[ResNet V1 50](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_50_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz)|75.2|92.2|
[ResNet V1 101](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_101_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz)|76.4|92.9|
[ResNet V1 152](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_152_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_152_2016_08_28.tar.gz)|76.8|93.2|
[ResNet V2 50](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_50_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz)|75.6|92.8|
[ResNet V2 101](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_101_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_101_2017_04_14.tar.gz)|77.0|93.7|
[ResNet V2 152](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_152_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_152_2017_04_14.tar.gz)|77.8|94.1|
[ResNet V2 200](https://arxiv.org/abs/1603.05027)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[TBA]()|79.9\*|95.2\*|
[VGG 16](http://arxiv.org/abs/1409.1556.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py)|[vgg_16_2016_08_28.tar.gz](http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz)|71.5|89.8|
[VGG 19](http://arxiv.org/abs/1409.1556.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py)|[vgg_19_2016_08_28.tar.gz](http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz)|71.1|89.8|
[MobileNet_v1_1.0_224](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_1.0_224.tgz](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz)|70.9|89.9|
[MobileNet_v1_0.50_160](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_0.50_160.tgz](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160.tgz)|59.1|81.9|
[MobileNet_v1_0.25_128](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_0.25_128.tgz](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz)|41.5|66.3|
[MobileNet_v2_1.4_224^*](https://arxiv.org/abs/1801.04381)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py)| [mobilenet_v2_1.4_224.tgz](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz) | 74.9 | 92.5|
[MobileNet_v2_1.0_224^*](https://arxiv.org/abs/1801.04381)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py)| [mobilenet_v2_1.0_224.tgz](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_224.tgz) | 71.9 | 91.0
[NASNet-A_Mobile_224](https://arxiv.org/abs/1707.07012)#|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py)|[nasnet-a_mobile_04_10_2017.tar.gz](https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_mobile_04_10_2017.tar.gz)|74.0|91.6|
[NASNet-A_Large_331](https://arxiv.org/abs/1707.07012)#|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py)|[nasnet-a_large_04_10_2017.tar.gz](https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_large_04_10_2017.tar.gz)|82.7|96.2|
[PNASNet-5_Large_331](https://arxiv.org/abs/1712.00559)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/pnasnet.py)|[pnasnet-5_large_2017_12_13.tar.gz](https://storage.googleapis.com/download.tensorflow.org/models/pnasnet-5_large_2017_12_13.tar.gz)|82.9|96.2|
[PNASNet-5_Mobile_224](https://arxiv.org/abs/1712.00559)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/pnasnet.py)|[pnasnet-5_mobile_2017_12_13.tar.gz](https://storage.googleapis.com/download.tensorflow.org/models/pnasnet-5_mobile_2017_12_13.tar.gz)|74.2|91.9|
^ ResNet V2 models use Inception pre-processing and input image size of 299 (use
`--preprocessing_name inception --eval_image_size 299` when using
`eval_image_classifier.py`). Performance numbers for ResNet V2 models are
reported on the ImageNet validation set.
(#) More information and details about the NASNet architectures are available at this [README](nets/nasnet/README.md)
All 16 float MobileNet V1 models reported in the [MobileNet Paper](https://arxiv.org/abs/1704.04861) and all
16 quantized [TensorFlow Lite](https://www.tensorflow.org/mobile/tflite/) compatible MobileNet V1 models can be found
[here](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet_v1.md).
(^#) More details on MobileNetV2 models can be found [here](nets/mobilenet/README.md).
(\*): Results quoted from the [paper](https://arxiv.org/abs/1603.05027).
Here is an example of how to download the Inception V3 checkpoint:
```shell
$ CHECKPOINT_DIR=/tmp/checkpoints
$ mkdir ${CHECKPOINT_DIR}
$ wget http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz
$ tar -xvf inception_v3_2016_08_28.tar.gz
$ mv inception_v3.ckpt ${CHECKPOINT_DIR}
$ rm inception_v3_2016_08_28.tar.gz
```
# Training a model from scratch.
<a id='Training'></a>
We provide an easy way to train a model from scratch using any TF-Slim dataset.
The following example demonstrates how to train Inception V3 using the default
parameters on the ImageNet dataset.
```shell
DATASET_DIR=/tmp/imagenet
TRAIN_DIR=/tmp/train_logs
python train_image_classifier.py \
--train_dir=${TRAIN_DIR} \
--dataset_name=imagenet \
--dataset_split_name=train \
--dataset_dir=${DATASET_DIR} \
--model_name=inception_v3
```
This process may take several days, depending on your hardware setup.
For convenience, we provide a way to train a model on multiple GPUs,
and/or multiple CPUs, either synchrononously or asynchronously.
See [model_deploy](https://github.com/tensorflow/models/blob/master/research/slim/deployment/model_deploy.py)
for details.
### TensorBoard
To visualize the losses and other metrics during training, you can use
[TensorBoard](https://github.com/tensorflow/tensorboard)
by running the command below.
```shell
tensorboard --logdir=${TRAIN_DIR}
```
Once TensorBoard is running, navigate your web browser to http://localhost:6006.
# Fine-tuning a model from an existing checkpoint
<a id='Tuning'></a>
Rather than training from scratch, we'll often want to start from a pre-trained
model and fine-tune it.
To indicate a checkpoint from which to fine-tune, we'll call training with
the `--checkpoint_path` flag and assign it an absolute path to a checkpoint
file.
When fine-tuning a model, we need to be careful about restoring checkpoint
weights. In particular, when we fine-tune a model on a new task with a different
number of output labels, we wont be able restore the final logits (classifier)
layer. For this, we'll use the `--checkpoint_exclude_scopes` flag. This flag
hinders certain variables from being loaded. When fine-tuning on a
classification task using a different number of classes than the trained model,
the new model will have a final 'logits' layer whose dimensions differ from the
pre-trained model. For example, if fine-tuning an ImageNet-trained model on
Flowers, the pre-trained logits layer will have dimensions `[2048 x 1001]` but
our new logits layer will have dimensions `[2048 x 5]`. Consequently, this
flag indicates to TF-Slim to avoid loading these weights from the checkpoint.
Keep in mind that warm-starting from a checkpoint affects the model's weights
only during the initialization of the model. Once a model has started training,
a new checkpoint will be created in `${TRAIN_DIR}`. If the fine-tuning
training is stopped and restarted, this new checkpoint will be the one from
which weights are restored and not the `${checkpoint_path}$`. Consequently,
the flags `--checkpoint_path` and `--checkpoint_exclude_scopes` are only used
during the `0-`th global step (model initialization). Typically for fine-tuning
one only want train a sub-set of layers, so the flag `--trainable_scopes` allows
to specify which subsets of layers should trained, the rest would remain frozen.
Below we give an example of
[fine-tuning inception-v3 on flowers](https://github.com/tensorflow/models/blob/master/research/slim/scripts/finetune_inception_v3_on_flowers.sh),
inception_v3 was trained on ImageNet with 1000 class labels, but the flowers
dataset only have 5 classes. Since the dataset is quite small we will only train
the new layers.
```shell
$ DATASET_DIR=/tmp/flowers
$ TRAIN_DIR=/tmp/flowers-models/inception_v3
$ CHECKPOINT_PATH=/tmp/my_checkpoints/inception_v3.ckpt
$ python train_image_classifier.py \
--train_dir=${TRAIN_DIR} \
--dataset_dir=${DATASET_DIR} \
--dataset_name=flowers \
--dataset_split_name=train \
--model_name=inception_v3 \
--checkpoint_path=${CHECKPOINT_PATH} \
--checkpoint_exclude_scopes=InceptionV3/Logits,InceptionV3/AuxLogits \
--trainable_scopes=InceptionV3/Logits,InceptionV3/AuxLogits
```
# Evaluating performance of a model
<a id='Eval'></a>
To evaluate the performance of a model (whether pretrained or your own),
you can use the eval_image_classifier.py script, as shown below.
Below we give an example of downloading the pretrained inception model and
evaluating it on the imagenet dataset.
```shell
CHECKPOINT_FILE = ${CHECKPOINT_DIR}/inception_v3.ckpt # Example
$ python eval_image_classifier.py \
--alsologtostderr \
--checkpoint_path=${CHECKPOINT_FILE} \
--dataset_dir=${DATASET_DIR} \
--dataset_name=imagenet \
--dataset_split_name=validation \
--model_name=inception_v3
```
See the [evaluation module example](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim#evaluation-loop)
for an example of how to evaluate a model at multiple checkpoints during or after the training.
# Exporting the Inference Graph
<a id='Export'></a>
Saves out a GraphDef containing the architecture of the model.
To use it with a model name defined by slim, run:
```shell
$ python export_inference_graph.py \
--alsologtostderr \
--model_name=inception_v3 \
--output_file=/tmp/inception_v3_inf_graph.pb
$ python export_inference_graph.py \
--alsologtostderr \
--model_name=mobilenet_v1 \
--image_size=224 \
--output_file=/tmp/mobilenet_v1_224.pb
```
## Freezing the exported Graph
If you then want to use the resulting model with your own or pretrained
checkpoints as part of a mobile model, you can run freeze_graph to get a graph
def with the variables inlined as constants using:
```shell
bazel build tensorflow/python/tools:freeze_graph
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=/tmp/inception_v3_inf_graph.pb \
--input_checkpoint=/tmp/checkpoints/inception_v3.ckpt \
--input_binary=true --output_graph=/tmp/frozen_inception_v3.pb \
--output_node_names=InceptionV3/Predictions/Reshape_1
```
The output node names will vary depending on the model, but you can inspect and
estimate them using the summarize_graph tool:
```shell
bazel build tensorflow/tools/graph_transforms:summarize_graph
bazel-bin/tensorflow/tools/graph_transforms/summarize_graph \
--in_graph=/tmp/inception_v3_inf_graph.pb
```
## Run label image in C++
To run the resulting graph in C++, you can look at the label_image sample code:
```shell
bazel build tensorflow/examples/label_image:label_image
bazel-bin/tensorflow/examples/label_image/label_image \
--image=${HOME}/Pictures/flowers.jpg \
--input_layer=input \
--output_layer=InceptionV3/Predictions/Reshape_1 \
--graph=/tmp/frozen_inception_v3.pb \
--labels=/tmp/imagenet_slim_labels.txt \
--input_mean=0 \
--input_std=255
```
# Troubleshooting
<a id='Troubleshooting'></a>
#### The model runs out of CPU memory.
See
[Model Runs out of CPU memory](https://github.com/tensorflow/models/tree/master/research/inception#the-model-runs-out-of-cpu-memory).
#### The model runs out of GPU memory.
See
[Adjusting Memory Demands](https://github.com/tensorflow/models/tree/master/research/inception#adjusting-memory-demands).
#### The model training results in NaN's.
See
[Model Resulting in NaNs](https://github.com/tensorflow/models/tree/master/research/inception#the-model-training-results-in-nans).
#### The ResNet and VGG Models have 1000 classes but the ImageNet dataset has 1001
The ImageNet dataset provided has an empty background class which can be used
to fine-tune the model to other tasks. If you try training or fine-tuning the
VGG or ResNet models using the ImageNet dataset, you might encounter the
following error:
```bash
InvalidArgumentError: Assign requires shapes of both tensors to match. lhs shape= [1001] rhs shape= [1000]
```
This is due to the fact that the VGG and ResNet V1 final layers have only 1000
outputs rather than 1001.
To fix this issue, you can set the `--labels_offset=1` flag. This results in
the ImageNet labels being shifted down by one:
#### I wish to train a model with a different image size.
The preprocessing functions all take `height` and `width` as parameters. You
can change the default values using the following snippet:
```python
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
height=MY_NEW_HEIGHT,
width=MY_NEW_WIDTH,
is_training=True)
```
#### What hardware specification are these hyper-parameters targeted for?
See
[Hardware Specifications](https://github.com/tensorflow/models/tree/master/research/inception#what-hardware-specification-are-these-hyper-parameters-targeted-for).
|
TensorFlow/Detection/SSD/models/research/object_detection/legacy | legacy | train | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training executable for detection models.
This executable is used to train DetectionModels. There are two ways of
configuring the training job:
1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file
can be specified by --pipeline_config_path.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files can be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being trained, an
input_reader_pb2.InputReader file to specify what training data will be used and
a train_pb2.TrainConfig file to configure training parameters.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--model_config_path=model_config.pbtxt \
--train_config_path=train_config.pbtxt \
--input_config_path=train_input_config.pbtxt
"""
import functools
import json
import os
import tensorflow as tf
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.legacy import trainer
from object_detection.utils import config_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')
flags.DEFINE_boolean('clone_on_cpu', False,
'Force clones to be deployed on CPU. Note that even if '
'set to False (allowing ops to run on gpu), some ops may '
'still be run on the CPU if they have no GPU kernel.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '
'replicas.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter server tasks. If None, does not use '
'a parameter server.')
flags.DEFINE_string('train_dir', '',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
FLAGS = flags.FLAGS
@tf.contrib.framework.deprecated(None, 'Use object_detection/model_main.py.')
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
if FLAGS.task == 0:
tf.gfile.Copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.train_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
train_config_path=FLAGS.train_config_path,
train_input_config_path=FLAGS.input_config_path)
if FLAGS.task == 0:
for name, config in [('model.config', FLAGS.model_config_path),
('train.config', FLAGS.train_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name),
overwrite=True)
model_config = configs['model']
train_config = configs['train_config']
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
def get_next(config):
return dataset_builder.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task_info = type('TaskSpec', (object,), task_data)
# Parameters for a single worker.
ps_tasks = 0
worker_replicas = 1
worker_job_name = 'lonely_worker'
task = 0
is_chief = True
master = ''
if cluster_data and 'worker' in cluster_data:
# Number of total worker replicas include "worker"s and the "master".
worker_replicas = len(cluster_data['worker']) + 1
if cluster_data and 'ps' in cluster_data:
ps_tasks = len(cluster_data['ps'])
if worker_replicas > 1 and ps_tasks < 1:
raise ValueError('At least 1 ps task is needed for distributed training.')
if worker_replicas >= 1 and ps_tasks > 0:
# Set up distributed training.
server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc',
job_name=task_info.type,
task_index=task_info.index)
if task_info.type == 'ps':
server.join()
return
worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
task = task_info.index
is_chief = (task_info.type == 'master')
master = server.target
graph_rewriter_fn = None
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=True)
trainer.train(
create_input_dict_fn,
model_fn,
train_config,
master,
task,
FLAGS.num_clones,
worker_replicas,
FLAGS.clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
FLAGS.train_dir,
graph_hook_fn=graph_rewriter_fn)
if __name__ == '__main__':
tf.app.run()
|
PyTorch/Translation/GNMT/scripts/docker | docker | build | #!/bin/bash
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
docker build . --network=host --rm -t gnmt:latest
|
TensorFlow/Segmentation/UNet_Medical/examples | examples | unet_INFER_TF-TRT | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches U-Net inference in TF-AMP on 1 GPUs
# Usage ./unet_INFER_FP32.sh <path to this repository> <path to dataset> <path to results directory> <batch size>
python $1/main.py --data_dir $2 --model_dir $3 --batch_size $4 --exec_mode predict --use_trt --xla
|
TensorFlow2/LanguageModeling/BERT | BERT | tf_trt | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.compat.v1.saved_model import tag_constants, signature_constants
def export_model(model_dir, prec, tf_trt_model_dir=None):
model = tf.saved_model.load(model_dir)
input_shape = [1, 384]
dummy_input = tf.constant(tf.zeros(input_shape, dtype=tf.int32))
x = [
tf.constant(dummy_input, name='input_word_ids'),
tf.constant(dummy_input, name='input_mask'),
tf.constant(dummy_input, name='input_type_ids'),
]
_ = model(x)
trt_prec = trt.TrtPrecisionMode.FP32 if prec == "fp32" else trt.TrtPrecisionMode.FP16
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=model_dir,
conversion_params=trt.TrtConversionParams(precision_mode=trt_prec),
)
converter.convert()
tf_trt_model_dir = tf_trt_model_dir or f'/tmp/tf-trt_model_{prec}'
converter.save(tf_trt_model_dir)
print(f"TF-TRT model saved at {tf_trt_model_dir}")
class SavedModel:
def __init__(self, model_dir, precision):
self.saved_model_loaded = tf.saved_model.load(model_dir, tags=[tag_constants.SERVING])
self.graph_func = self.saved_model_loaded.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.precision = tf.float16 if precision == "amp" else tf.float32
def __call__(self, x, **kwargs):
return self.infer_step(x)
@tf.function
def infer_step(self, x):
output = self.graph_func(**x)
return output['start_positions'], output['end_positions']
class TFTRTModel:
def __init__(self, model_dir, precision):
temp_tftrt_dir = f"/tmp/tf-trt_model_{precision}"
export_model(model_dir, precision, temp_tftrt_dir)
saved_model_loaded = tf.saved_model.load(temp_tftrt_dir, tags=[tag_constants.SERVING])
print(f"TF-TRT model loaded from {temp_tftrt_dir}")
self.graph_func = saved_model_loaded.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.precision = tf.float16 if precision == "amp" else tf.float32
def __call__(self, x, **kwargs):
return self.infer_step(x)
@tf.function
def infer_step(self, x):
output = self.graph_func(**x)
return output['start_positions'], output['end_positions']
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils | utils | collect_env | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import PIL
from torch.utils.collect_env import get_pretty_env_info
def get_pil_version():
return "\n Pillow ({})".format(PIL.__version__)
def collect_env_info():
env_str = get_pretty_env_info()
env_str += get_pil_version()
return env_str
|
TensorFlow2/LanguageModeling/BERT/data | data | __init__ | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
TensorFlow2/Recommendation/SIM/sim/layers | layers | rnn | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class VecAttGRUCell(tf.keras.layers.Layer):
"""
Modification of Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
units: int, The number of units in the GRU cell.
"""
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
self._activation = tf.math.tanh
self._gate_linear = tf.keras.layers.Dense(
2 * self.units,
bias_initializer=tf.constant_initializer(1.0),
kernel_initializer=None,
)
self._candidate_linear = tf.keras.layers.Dense(
self.units,
bias_initializer=tf.constant_initializer(0.0),
kernel_initializer=None,
)
super(VecAttGRUCell, self).__init__(**kwargs)
def call(self, inputs_attscore, states):
"""Gated recurrent unit (GRU) with nunits cells."""
inputs, att_score = inputs_attscore
state = states[0]
value = tf.math.sigmoid(self._gate_linear(tf.concat([inputs, state], axis=-1)))
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
c = self._activation(
self._candidate_linear(tf.concat([inputs, r_state], axis=-1))
)
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, [new_h]
class AUGRU(tf.keras.layers.Layer):
def __init__(self, num_units=None, return_sequence=True, **kwargs):
self.num_units = num_units
self.return_sequence = return_sequence
super(AUGRU, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.internal_rnn = tf.keras.layers.RNN(VecAttGRUCell(self.num_units))
# Be sure to call this somewhere!
super(AUGRU, self).build(input_shape)
def call(self, input_list):
"""
:param concated_embeds_value: None * field_size * embedding_size
:return: None*1
"""
return self.internal_rnn(tuple(input_list))
|
Kaldi/SpeechRecognition/notebooks | notebooks | Kaldi_TRTIS_inference_offline_demo | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# Copyright 2019 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
#
# # Kaldi TRTIS Inference Offline Demo
# ## Overview
#
#
# This repository provides a wrapper around the online GPU-accelerated ASR pipeline from the paper [GPU-Accelerated Viterbi Exact Lattice Decoder for Batched Online and Offline Speech Recognition](https://arxiv.org/abs/1910.10032). That work includes a high-performance implementation of a GPU HMM Decoder, a low-latency Neural Net driver, fast Feature Extraction for preprocessing, and new ASR pipelines tailored for GPUs. These different modules have been integrated into the Kaldi ASR framework.
#
# This repository contains a TensorRT Inference Server custom backend for the Kaldi ASR framework. This custom backend calls the high-performance online GPU pipeline from the Kaldi ASR framework. This TensorRT Inference Server integration provides ease-of-use to Kaldi ASR inference: gRPC streaming server, dynamic sequence batching, and multi-instances support. A client connects to the gRPC server, streams audio by sending chunks to the server, and gets back the inferred text as an answer. More information about the TensorRT Inference Server can be found [here](https://docs.nvidia.com/deeplearning/sdk/tensorrt-inference-server-guide/docs/).
#
#
#
# ### Learning objectives
#
# This notebook demonstrates the steps for carrying out inferencing with the Kaldi TRTIS backend server using a Python gRPC client in an offline context, that is, we will stream pre-recorded .wav files to the inference server and receive the results back.
#
# ## Content
# 1. [Pre-requisite](#1)
# 1. [Setup](#2)
# 1. [Audio helper classes](#3)
# 1. [Inference](#4)
#
# <a id="1"></a>
# ## 1. Pre-requisite
#
# ### 1.1 Docker containers
# Follow the steps in [README](README.md) to build Kaldi server and client containers.
#
# ### 1.2 Hardware
# This notebook can be executed on any CUDA-enabled NVIDIA GPU, although for efficient mixed precision inference, a [Tensor Core NVIDIA GPU](https://www.nvidia.com/en-us/data-center/tensorcore/) is desired (Volta, Turing or newer architectures).
# In[2]:
get_ipython().system('nvidia-smi')
# ### 1.3 Data download and preprocessing
#
# The script `scripts/docker/launch_download.sh` will download the LibriSpeech test dataset along with Kaldi ASR models.
# In[3]:
get_ipython().system('ls /Kaldi/data/data/LibriSpeech')
# Within the docker container, the final data and model directory should look like:
#
# ```
# /Kaldi/data
# data
# datasets
# models
# ```
# <a id="2"></a>
# ## 2 Setup
# ### Import libraries and parameters
# In[4]:
import argparse
import numpy as np
import os
from builtins import range
from functools import partial
import soundfile
import pyaudio as pa
import soundfile
import subprocess
import grpc
from tensorrtserver.api import api_pb2
from tensorrtserver.api import grpc_service_pb2
from tensorrtserver.api import grpc_service_pb2_grpc
import tensorrtserver.api.model_config_pb2 as model_config
# In[5]:
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='Path for input file. First line should contain number of lines to search in')
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-a', '--async', dest="async_set", action="store_true", required=False,
default=False, help='Use asynchronous inference API')
parser.add_argument('--streaming', action="store_true", required=False, default=False,
help='Use streaming inference API')
parser.add_argument('-m', '--model-name', type=str, required=False, default='kaldi_online' ,
help='Name of model')
parser.add_argument('-x', '--model-version', type=int, required=False, default=1,
help='Version of model. Default is to use latest version.')
parser.add_argument('-b', '--batch-size', type=int, required=False, default=1,
help='Batch size. Default is 1.')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8001',
help='Inference server URL. Default is localhost:8001.')
FLAGS = parser.parse_args()
# ### Checking server status
#
# We first query the status of the server. The target model is 'kaldi_online'. A successful deployment of the server should result in output similar to the below.
#
# ```
# request_status {
# code: SUCCESS
# server_id: "inference:0"
# request_id: 17514
# }
# server_status {
# id: "inference:0"
# version: "1.9.0"
# uptime_ns: 14179155408971
# model_status {
# key: "kaldi_online"
# ...
# ```
# In[6]:
# Create gRPC stub for communicating with the server
channel = grpc.insecure_channel(FLAGS.url)
grpc_stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)
# Prepare request for Status gRPC
request = grpc_service_pb2.StatusRequest(model_name=FLAGS.model_name)
# Call and receive response from Status gRPC
response = grpc_stub.Status(request)
print(response)
# <a id="3"></a>
# ## 3. Audio helper classes
# Next, we define some helper classes for pre-processing audio from files. The below AudioSegment class reads audio data from .wav files and converts the sampling rate to that required by the Kaldi ASR model, which is 16000Hz by default.
#
# Note: For historical reasons, Kaldi expects waveforms in the range (2^15-1)x[-1, 1], not the usual default DSP range [-1, 1]. Therefore, we scale the audio signal by a factor of (2^15-1).
# In[7]:
WAV_SCALE_FACTOR = 2**15-1
class AudioSegment(object):
"""Monaural audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, samples, sample_rate, target_sr=16000, trim=False,
trim_db=60):
"""Create audio segment from samples.
Samples are convert float32 internally, with int scaled to [-1, 1].
"""
samples = self._convert_samples_to_float32(samples)
if target_sr is not None and target_sr != sample_rate:
samples = librosa.core.resample(samples, sample_rate, target_sr)
sample_rate = target_sr
if trim:
samples, _ = librosa.effects.trim(samples, trim_db)
self._samples = samples
self._sample_rate = sample_rate
if self._samples.ndim >= 2:
self._samples = np.mean(self._samples, 1)
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / ((2 ** (bits - 1)) - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return WAV_SCALE_FACTOR * float32_samples
@classmethod
def from_file(cls, filename, target_sr=16000, offset=0, duration=0,
min_duration=0, trim=False):
"""
Load a file supported by librosa and return as an AudioSegment.
:param filename: path of file to load
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:param offset: offset in seconds when loading audio
:param duration: duration in seconds when loading audio
:return: numpy array of samples
"""
with sf.SoundFile(filename, 'r') as f:
dtype_options = {'PCM_16': 'int16', 'PCM_32': 'int32', 'FLOAT': 'float32'}
dtype_file = f.subtype
if dtype_file in dtype_options:
dtype = dtype_options[dtype_file]
else:
dtype = 'float32'
sample_rate = f.samplerate
if offset > 0:
f.seek(int(offset * sample_rate))
if duration > 0:
samples = f.read(int(duration * sample_rate), dtype=dtype)
else:
samples = f.read(dtype=dtype)
num_zero_pad = int(target_sr * min_duration - samples.shape[0])
if num_zero_pad > 0:
samples = np.pad(samples, [0, num_zero_pad], mode='constant')
samples = samples.transpose()
return cls(samples, sample_rate, target_sr=target_sr, trim=trim)
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
# In[8]:
# read audio chunk from a file
def get_audio_chunk_from_soundfile(sf, chunk_size):
dtype_options = {'PCM_16': 'int16', 'PCM_32': 'int32', 'FLOAT': 'float32'}
dtype_file = sf.subtype
if dtype_file in dtype_options:
dtype = dtype_options[dtype_file]
else:
dtype = 'float32'
audio_signal = sf.read(chunk_size, dtype=dtype)
end = False
# pad to chunk size
if len(audio_signal) < chunk_size:
end = True
audio_signal = np.pad(audio_signal, (0, chunk_size-len(
audio_signal)), mode='constant')
return audio_signal, end
# generator that returns chunks of audio data from file
def audio_generator_from_file(input_filename, target_sr, chunk_duration):
sf = soundfile.SoundFile(input_filename, 'rb')
chunk_size = int(chunk_duration*sf.samplerate)
start = True
end = False
while not end:
audio_signal, end = get_audio_chunk_from_soundfile(sf, chunk_size)
audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr)
yield audio_segment.samples, target_sr, start, end
start = False
sf.close()
# ### Loading data
#
# We load and play a wave file from the LibriSpeech data set. The LibriSpeech data set is organized into directories and subdirectories containing speech segments and transcripts for different speakers.
# In[9]:
get_ipython().system('ls /Kaldi/data/data/LibriSpeech/test-clean/1089/134686/')
# In[10]:
DIR = "1089"
SUBDIR = "134686"
FILE_ID = "0000"
FILE_NAME = "/Kaldi/data/data/LibriSpeech/test-clean/%s/%s/%s-%s-%s.wav"%(DIR, SUBDIR, DIR, SUBDIR, FILE_ID)
TRANSRIPTION_FILE = "/Kaldi/data/data/LibriSpeech/test-clean/%s/%s/%s-%s.trans.txt"%(DIR, SUBDIR, DIR, SUBDIR)
batcmd = "cat %s|grep %s"%(TRANSRIPTION_FILE, FILE_ID)
res = subprocess.check_output(batcmd, shell=True)
transcript = " ".join(res.decode('utf-8').split(" ")[1:]).lower()
print(transcript)
import IPython.display as ipd
ipd.Audio(FILE_NAME)
# Next, we define a helper function which generate pairs of filepath and transcript from a LibriSpeech data directory.
# In[11]:
def libri_generator(DATASET_ROOT):
for subdir in os.listdir(DATASET_ROOT):
SUBDIR = os.path.join(DATASET_ROOT, subdir)
if os.path.isdir(os.path.join(DATASET_ROOT, subdir)):
for subsubdir in os.listdir(SUBDIR):
SUBSUBDIR = os.path.join(SUBDIR, subsubdir)
#print(os.listdir(SUBSUBDIR))
transcription_file = os.path.join(DATASET_ROOT, SUBDIR, SUBSUBDIR, "%s-%s.trans.txt"%(subdir, subsubdir))
transcriptions = {}
#pdb.set_trace()
with open(transcription_file, "r") as f:
for line in f:
fields = line.split(" ")
transcriptions[fields[0]] = " ".join(fields[1:])
for file_key, transcript in transcriptions.items():
file_path = os.path.join(DATASET_ROOT, SUBDIR, SUBSUBDIR, file_key+'.wav')
yield file_path, transcript.strip().lower()
# In[12]:
datagen = libri_generator("/Kaldi/data/data/LibriSpeech/test-clean/")
filepath, transcript = next(datagen)
# In[13]:
print(transcript)
import IPython.display as ipd
ipd.Audio(filepath)
# <a id="4"></a>
# ## Inference
#
# We first create an inference context object that connects to the Kaldi TRTIS servier via a gPRC connection.
#
# The server expects chunks of audio each containing up to input.WAV_DATA.dims samples (default: 8160). Per default, this corresponds to 510ms of audio per chunk (i.e. 16000Hz sampling rate). The last chunk can send a partial chunk smaller than this maximum value.
# In[14]:
from tensorrtserver.api import *
protocol = ProtocolType.from_str("grpc")
CORRELATION_ID = 1101
ctx = InferContext(FLAGS.url, protocol, FLAGS.model_name, FLAGS.model_version,
correlation_id=CORRELATION_ID, verbose=True,
streaming=False)
# Next, we take chunks from a selected audio file (each 510ms in duration, containing 8160 samples) and stream them sequentially to the Kaldi server. The server processes each chunk as soon as it is received. The transcription result is returned upon receiving the final chunk.
#
# In the following, you can either specify a specific .wav file or take a file via the Kaldi dataset generator.
# In[15]:
## Take a specific file
DIR = "1089"
SUBDIR = "134686"
FILE_ID = "0000"
FILE_NAME = "/Kaldi/data/data/LibriSpeech/test-clean/%s/%s/%s-%s-%s.wav"%(DIR, SUBDIR, DIR, SUBDIR, FILE_ID)
TRANSRIPTION_FILE = "/Kaldi/data/data/LibriSpeech/test-clean/%s/%s/%s-%s.trans.txt"%(DIR, SUBDIR, DIR, SUBDIR)
batcmd = "cat %s|grep %s"%(TRANSRIPTION_FILE, FILE_ID)
res = subprocess.check_output(batcmd, shell=True)
transcript = " ".join(res.decode('utf-8').split(" ")[1:]).lower()
## Alternatively, take a file from the data generator
#FILE_NAME, transcript = next(datagen)
cnt = 0
for audio_chunk in audio_generator_from_file(FILE_NAME, 16000, 0.51):
print("Chunk ", cnt, audio_chunk[0].shape, audio_chunk[1], audio_chunk[2], audio_chunk[3])
cnt += 1
flags = InferRequestHeader.FLAG_NONE
if audio_chunk[2]:
flags = flags | InferRequestHeader.FLAG_SEQUENCE_START
if audio_chunk[3]:
flags = flags | InferRequestHeader.FLAG_SEQUENCE_END
if not audio_chunk[3]: # if not end of sequence
ctx.run({'WAV_DATA' : (audio_chunk[0],),
'WAV_DATA_DIM' : (np.full(shape=1, fill_value=len(audio_chunk[0]), dtype=np.int32),)
},
{},
batch_size=1,
flags=flags,
corr_id=CORRELATION_ID)
else:
result = ctx.run({'WAV_DATA' : (audio_chunk[0],),
'WAV_DATA_DIM' : (np.full(shape=1, fill_value=len(audio_chunk[0]), dtype=np.int32),)
},
{ 'TEXT' : InferContext.ResultFormat.RAW},
batch_size=1,
flags=flags,
corr_id=CORRELATION_ID)
print("ASR output: %s" % "".join([c.decode('utf-8') for c in result['TEXT'][0]]).lower())
if transcript:
print("Ground truth: %s"%transcript.lower())
# # Conclusion
#
# In this notebook, we have walked through the complete process of preparing the audio data and carry out inference with the Kaldi ASR model.
#
# ## What's next
# Now it's time to try the Kaldi ASR model on your own data.
#
# In[ ]:
|
TensorFlow2/Detection/Efficientdet/efficientnet/layers | layers | activations | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Customized Swish activation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import math
import tensorflow as tf
__all__ = ['simple_swish', 'hard_swish', 'identity', 'gelu', 'get_activation']
@tf.keras.utils.register_keras_serializable(package='Text')
def simple_swish(features):
"""Computes the Swish activation function.
The tf.nn.swish operation uses a custom gradient to reduce memory usage.
Since saving custom gradients in SavedModel is currently not supported, and
one would not be able to use an exported TF-Hub module for fine-tuning, we
provide this wrapper that can allow to select whether to use the native
TensorFlow swish operation, or whether to use a customized operation that
has uses default TensorFlow gradient computation.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.sigmoid(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def hard_swish(features):
"""Computes a hard version of the swish function.
This operation can be used to reduce computational cost and improve
quantization for edge devices.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.)
@tf.keras.utils.register_keras_serializable(package='Text')
def identity(features):
"""Computes the identity function.
Useful for helping in quantization.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return tf.identity(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
# TODO(hongkuny): consider moving custom string-map lookup to keras api.
def get_activation(identifier):
"""Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
It checks string first and if it is one of customized activation not in TF,
the corresponding activation will be returned. For non-customized activation
names and callable identifiers, always fallback to tf.keras.activations.get.
Args:
identifier: String name of the activation function or callable.
Returns:
A Python function corresponding to the activation function.
"""
if isinstance(identifier, six.string_types):
name_to_fn = {
"gelu": gelu,
"simple_swish": simple_swish,
"hard_swish": hard_swish,
"identity": identity,
}
identifier = str(identifier).lower()
if identifier in name_to_fn:
return tf.keras.activations.get(name_to_fn[identifier])
return tf.keras.activations.get(identifier) |
TensorFlow2/LanguageModeling/ELECTRA/scripts | scripts | run_pretraining | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Container nvidia build = " $NVIDIA_BUILD_ID
train_batch_size_p1=${1:-176}
learning_rate_p1=${2:-"6e-3"}
precision=${3:-"amp"}
num_gpus=${4:-8}
xla=${5:-"xla"}
warmup_steps_p1=${6:-"2000"}
train_steps_p1=${7:-10000}
save_checkpoint_steps=${8:-500}
resume_training=${9:-"false"}
optimizer=${10:-"lamb"}
accumulate_gradients=${11:-"true"}
gradient_accumulation_steps_p1=${12:-48}
seed=${13:-12439}
job_name=${14:-"electra_lamb_pretraining"}
train_batch_size_p2=${15:-24}
learning_rate_p2=${16:-"4e-3"}
warmup_steps_p2=${17:-"200"}
train_steps_p2=${18:-933}
gradient_accumulation_steps_p2=${19:-144}
ELECTRA_MODEL=${20:-"base"}
DATASET_P1="tfrecord_lower_case_1_seq_len_128_random_seed_12345/books_wiki_en_corpus/train/pretrain_data*" # change this for other datasets
DATA_DIR_P1=${21:-"$DATA_PREP_WORKING_DIR/$DATASET_P1"}
DATASET_P2="tfrecord_lower_case_1_seq_len_512_random_seed_12345/books_wiki_en_corpus/train/pretrain_data*" # change this for other datasets
DATA_DIR_P2=${22:-"$DATA_PREP_WORKING_DIR/$DATASET_P2"}
CODEDIR=${23:-"/workspace/electra"}
init_checkpoint=${24:-"None"}
restore_checkpoint=${restore_checkpoint:-"true"}
RESULTS_DIR=$CODEDIR/results
if [ ! -d "$RESULTS_DIR" ] ; then
echo "Error! $RESULTS_DIR directory missing."
exit -1
fi
PREFIX=""
TEST_RESULT=$(awk 'BEGIN {print ('1' <= '${num_gpus}')}')
if [ "$TEST_RESULT" == 1 ] ; then
PREFIX="horovodrun -np $num_gpus "
fi
if [ "$precision" = "amp" ] ; then
PREC="--amp "
elif [ "$precision" = "fp32" ] ; then
PREC=""
elif [ "$precision" = "tf32" ] ; then
PREC=""
else
echo "Unknown <precision> argument"
exit -2
fi
if [ "$xla" = "xla" ] ; then
PREC="$PREC --xla"
fi
ACCUMULATE_GRADIENTS=""
if [ "$accumulate_gradients" == "true" ] ; then
ACCUMULATE_GRADIENTS="--gradient_accumulation_steps=$gradient_accumulation_steps_p1"
fi
CHECKPOINT=""
if [ "$resume_training" == "true" ] ; then
CHECKPOINT="--restore_checkpoint=latest"
fi
if [ "$init_checkpoint" != "None" ] ; then
CHECKPOINT="--restore_checkpoint=$init_checkpoint"
fi
CMD=" $CODEDIR/run_pretraining.py"
CMD+=" --model_name=${ELECTRA_MODEL}"
CMD+=" --pretrain_tfrecords=$DATA_DIR_P1"
CMD+=" --model_size=${ELECTRA_MODEL}"
CMD+=" --train_batch_size=$train_batch_size_p1"
CMD+=" --max_seq_length=128 --disc_weight=50.0 --generator_hidden_size=0.3333333 "
CMD+=" --num_train_steps=$train_steps_p1"
CMD+=" --num_warmup_steps=$warmup_steps_p1"
CMD+=" --save_checkpoints_steps=$save_checkpoint_steps"
CMD+=" --learning_rate=$learning_rate_p1"
CMD+=" --optimizer=${optimizer} --skip_adaptive --opt_beta_1=0.878 --opt_beta_2=0.974 --lr_decay_power=0.5"
CMD+=" --seed=$seed"
CMD+=" $PREC"
CMD+=" $ACCUMULATE_GRADIENTS"
CMD+=" $CHECKPOINT"
CMD+=" --log_dir ${RESULTS_DIR} "
CMD="$PREFIX python3 $CMD"
echo "Launch command: $CMD"
printf -v TAG "electra_pretraining_phase1_%s" "$precision"
DATESTAMP=`date +'%y%m%d%H%M%S'`
LOGFILE=$RESULTS_DIR/$job_name.$TAG.$DATESTAMP.log
printf "Logs written to %s\n" "$LOGFILE"
set -x
if [ -z "$LOGFILE" ] ; then
$CMD
else
(
$CMD
) |& tee $LOGFILE
fi
set +x
echo "finished pretraining phase1"
#Start Phase2
ACCUMULATE_GRADIENTS=""
if [ "$accumulate_gradients" == "true" ] ; then
ACCUMULATE_GRADIENTS="--gradient_accumulation_steps=$gradient_accumulation_steps_p2"
fi
RESTORE_CHECKPOINT=""
if [ "$restore_checkpoint" == "true" ] ; then
RESTORE_CHECKPOINT="--restore_checkpoint=latest --phase2"
fi
CMD=" $CODEDIR/run_pretraining.py"
CMD+=" --model_name=${ELECTRA_MODEL}"
CMD+=" --pretrain_tfrecords=$DATA_DIR_P2"
CMD+=" --model_size=${ELECTRA_MODEL}"
CMD+=" --train_batch_size=$train_batch_size_p2"
CMD+=" --max_seq_length=512 --disc_weight=50.0 --generator_hidden_size=0.3333333 ${RESTORE_CHECKPOINT}"
CMD+=" --num_train_steps=$train_steps_p2"
CMD+=" --num_warmup_steps=$warmup_steps_p2"
CMD+=" --save_checkpoints_steps=$save_checkpoint_steps"
CMD+=" --learning_rate=$learning_rate_p2"
CMD+=" --optimizer=${optimizer} --skip_adaptive --opt_beta_1=0.878 --opt_beta_2=0.974 --lr_decay_power=0.5"
CMD+=" --seed=$seed"
CMD+=" $PREC"
CMD+=" $ACCUMULATE_GRADIENTS"
CMD+=" --log_dir ${RESULTS_DIR} "
CMD="$PREFIX python3 $CMD"
echo "Launch command: $CMD"
printf -v TAG "electra_pretraining_phase2_%s" "$precision"
DATESTAMP=`date +'%y%m%d%H%M%S'`
LOGFILE=$RESULTS_DIR/$job_name.$TAG.$DATESTAMP.log
printf "Logs written to %s\n" "$LOGFILE"
set -x
if [ -z "$LOGFILE" ] ; then
$CMD
else
(
$CMD
) |& tee $LOGFILE
fi
set +x
echo "finished pretraining phase2"
|
TensorFlow/Detection/SSD/configs | configs | ssd320_full_8gpus | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SSD with Resnet 50 v1 FPN feature extractor, shared box predictor and focal
# loss (a.k.a Retinanet).
# See Lin et al, https://arxiv.org/abs/1708.02002
# Trained on COCO, initialized from Imagenet classification checkpoint
model {
ssd {
inplace_batchnorm_update: true
freeze_batchnorm: true
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
encode_background_as_zeros: true
anchor_generator {
multiscale_anchor_generator {
min_level: 3
max_level: 7
anchor_scale: 4.0
aspect_ratios: [1.0, 2.0, 0.5]
scales_per_octave: 2
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
depth: 256
class_prediction_bias_init: -4.6
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
}
num_layers_before_predictor: 4
kernel_size: 3
}
}
feature_extractor {
type: 'ssd_resnet50_v1_fpn'
fpn {
min_level: 3
max_level: 7
}
min_depth: 16
depth_multiplier: 1.0
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
}
override_base_feature_extractor_hyperparams: true
}
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
}
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
normalize_loc_loss_by_codesize: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
fine_tune_checkpoint: "/checkpoints/resnet_v1_50/model.ckpt"
fine_tune_checkpoint_type: "classification"
batch_size: 32
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 12500
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_crop_image {
min_object_covered: 0.0
min_aspect_ratio: 0.75
max_aspect_ratio: 3.0
min_area: 0.75
max_area: 1.0
overlap_thresh: 0.0
}
}
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .16000000000000000000
total_steps: 12500
warmup_learning_rate: .06933120000000000000
warmup_steps: 1000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
}
train_input_reader: {
tf_record_input_reader {
input_path: "/data/coco2017_tfrecords/*train*"
}
label_map_path: "object_detection/data/mscoco_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
num_examples: 8000
}
eval_input_reader: {
tf_record_input_reader {
input_path: "/data/coco2017_tfrecords/*val*"
}
label_map_path: "object_detection/data/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/cc/kernels/launchers | launchers | dot_based_interact_fp32_launcher | // Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef FP32_LAUNCHER_CU
#define FP32_LAUNCHER_CU
#include "../cuda_kernels/dot_based_interact_fp32.cu"
inline void dotBasedInteractFP32Fwd(const void *input,
const void *bottom_mlp_output,
void *output,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream) {
const uint kNumThreads = 128;
uint num_blocks = batch_size;
// Output
uint interaction_output_size = (num_rows * (num_rows - 1)) >> 1;
uint output_size = ((interaction_output_size+num_cols-1)/8 + 1)*8; //round up to multiple of 8
// Input
uint input_size = num_rows * num_cols;
uint shared_mem_size_elems = input_size;
uint shared_mem_size_bytes = shared_mem_size_elems << 2; // F32 Kernel
bool float4_predicate = !((num_cols & 3) || (output_size & 3));
if (float4_predicate) {
dotBasedInteractF32FwdKernel<kNumThreads>
<<<num_blocks, kNumThreads, shared_mem_size_bytes, stream>>>((const float *)input,
(float *)output,
batch_size,
num_rows,
num_cols,
input_size,
output_size,
interaction_output_size);
} else {
dotBasedInteractF32FwdKernelNonAligned<kNumThreads>
<<<num_blocks, kNumThreads, shared_mem_size_bytes, stream>>>((const float *)input,
(float *)output,
batch_size,
num_rows,
num_cols,
input_size,
output_size,
interaction_output_size);
}
}
inline void dotBasedInteractFP32Bwd(const void *input,
const void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream) {
const uint kNumThreads = 128;
uint num_blocks = batch_size;
uint input_size = num_rows * num_cols;
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1; //this IS supposed to be without padding
// this has to be the same padding that we applied in forward
uint unpadded_ugrad_size = num_cols + interaction_ugrad_size;
// this has to be the same padding that we applied in forward
uint padded_ugrad_size = ((unpadded_ugrad_size-1)/8 + 1)*8; //round up to multiple of 8
// input space + upstream grad space
// We copy the whole input plus just the unpadded interaction part of the upstream grad
uint smem_size_elems = input_size + interaction_ugrad_size;
uint smem_size_bytes = smem_size_elems << 2; // F32 Kernel
// we use the fact that padded_ugrad_size is always divisible by 4 - we just made it.
bool float4_predicate = !(num_cols & 3);
if (float4_predicate) {
dotBasedInteractF32BwdKernel<kNumThreads>
<<<num_blocks, kNumThreads, smem_size_bytes, stream>>>((const float *)input,
(const float *)upstream_grad,
(float *)grad,
(float *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
input_size,
padded_ugrad_size,
interaction_ugrad_size);
} else {
dotBasedInteractF32BwdKernelNonAligned<kNumThreads>
<<<num_blocks, kNumThreads, smem_size_bytes, stream>>>((const float *)input,
(const float *)upstream_grad,
(float *)grad,
(float *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
input_size,
padded_ugrad_size,
interaction_ugrad_size);
}
}
#endif /* FP32_LAUNCHER_CU */
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | np_box_mask_list_ops_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_mask_list_ops."""
import numpy as np
import tensorflow as tf
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_box_mask_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks1 = np.stack([masks1_0, masks1_1])
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0]],
dtype=np.uint8)
masks2 = np.stack([masks2_0, masks2_1, masks2_2])
self.box_mask_list1 = np_box_mask_list.BoxMaskList(
box_data=boxes1, mask_data=masks1)
self.box_mask_list2 = np_box_mask_list.BoxMaskList(
box_data=boxes2, mask_data=masks2)
def test_area(self):
areas = np_box_mask_list_ops.area(self.box_mask_list1)
expected_areas = np.array([8.0, 10.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_mask_list_ops.intersection(self.box_mask_list1,
self.box_mask_list2)
expected_intersection = np.array([[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_mask_list_ops.iou(self.box_mask_list1, self.box_mask_list2)
expected_iou = np.array(
[[1.0, 0.0, 8.0 / 25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=float)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
ioa21 = np_box_mask_list_ops.ioa(self.box_mask_list1, self.box_mask_list2)
expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0],
[0.0, 9.0/15.0, 7.0/25.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array(
[[4.0, 3.0, 7.0, 6.0], [5.0, 6.0, 10.0, 10.0]], dtype=float)
boxes2 = np.array(
[[3.0, 4.0, 6.0, 8.0], [5.0, 6.0, 10.0, 10.0], [1.0, 1.0, 10.0, 10.0]],
dtype=float)
masks1 = np.array(
[[[0, 1, 0], [1, 1, 0], [0, 0, 0]], [[0, 1, 1], [0, 1, 1], [0, 1, 1]]],
dtype=np.uint8)
masks2 = np.array(
[[[0, 1, 0], [1, 1, 1], [0, 0, 0]], [[0, 1, 0], [0, 0, 1], [0, 1, 1]],
[[0, 1, 1], [0, 1, 1], [0, 1, 1]]],
dtype=np.uint8)
self.boxes1 = boxes1
self.boxes2 = boxes2
self.masks1 = masks1
self.masks2 = masks2
def test_with_no_scores_field(self):
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=self.boxes1, mask_data=self.masks1)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_mask_list_ops.non_max_suppression(
box_mask_list, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_one(self):
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=self.boxes2, mask_data=self.masks2)
box_mask_list.add_field('scores',
np.array([.9, .75, .6], dtype=float))
max_output_size = 1
iou_threshold = 1. # No NMS
expected_boxes = np.array([[3.0, 4.0, 6.0, 8.0]], dtype=float)
expected_masks = np.array(
[[[0, 1, 0], [1, 1, 1], [0, 0, 0]]], dtype=np.uint8)
nms_box_mask_list = np_box_mask_list_ops.non_max_suppression(
box_mask_list, max_output_size, iou_threshold)
self.assertAllClose(nms_box_mask_list.get(), expected_boxes)
self.assertAllClose(nms_box_mask_list.get_masks(), expected_masks)
def test_multiclass_nms(self):
boxes = np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
mask0 = np.array([[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
mask1 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
mask2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
dtype=np.uint8)
masks = np.stack([mask0, mask1, mask2])
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxes, mask_data=masks)
scores = np.array([[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31]],
dtype=np.float32)
box_mask_list.add_field('scores', scores)
box_mask_list_clean = np_box_mask_list_ops.multi_class_non_max_suppression(
box_mask_list, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)
scores_clean = box_mask_list_clean.get_field('scores')
classes_clean = box_mask_list_clean.get_field('classes')
boxes = box_mask_list_clean.get()
masks = box_mask_list_clean.get_masks()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/trtis_client | trtis_client | phrases | Printing, differs from most other exhibits in being comparatively modern.
Once upon a midnight dreary, while I pondered, weak and weary.
The Earth is the third planet from the Sun.
Politically, the world has around two hundred sovereign states.
Outside, the temperature is twenty four point two degrees centigrade.
The time is twelve fifty two in the afternoon.
A yard is an English unit of measure and is equal to three feet.
There are approximately six hundred species of oak trees.
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt | trt | speechSynthesizer | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "speechSynthesizer.h"
#include "utils.h"
#include <algorithm>
#include <stdexcept>
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr int MAX_NUM_MELS_PER_CHAR = 10;
}
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
int maxMelsFromChars(const int numChars)
{
return numChars * MAX_NUM_MELS_PER_CHAR + 100;
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
SpeechSynthesizer::SpeechSynthesizer(
std::shared_ptr<Tacotron2Instance> tacotron,
std::shared_ptr<WaveGlowInstance> waveglow,
std::shared_ptr<DenoiserInstance> denoiser) :
TimedObject("SpeechSynthsizer::infer()"),
mMaxBatchSize(
std::min(tacotron->getMaxBatchSize(), waveglow->getMaxBatchSize())),
mNumMaxMels(maxMelsFromChars(tacotron->getMaximumInputLength())),
mNumSymbols(mMaxBatchSize),
mNumFrames(mMaxBatchSize),
mNumSamples(mMaxBatchSize),
mTacotron(tacotron),
mWaveglow(waveglow),
mDenoiser(denoiser),
mBuffer(
mTacotron->getMaximumInputLength(),
getMelSpacing() * mTacotron->getNumMelChannels(),
getMaxOutputSize(),
mMaxBatchSize)
{
addChild(mTacotron.get());
addChild(mWaveglow.get());
if (mDenoiser)
{
addChild(mDenoiser.get());
}
addChild(&mBuffer);
}
SpeechSynthesizer::SpeechSynthesizer(
std::shared_ptr<Tacotron2Instance> tacotron, std::shared_ptr<WaveGlowInstance> waveglow)
: SpeechSynthesizer(tacotron, waveglow, std::shared_ptr<DenoiserInstance>(nullptr))
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void SpeechSynthesizer::infer(const int batchSize, const int* const inputDevice, const int inputSpacing,
const int* const inputLength, float* const samplesDevice, int* const numSamples, float* const outputMelsDevice,
int* const outputNumMels)
{
startTiming();
if (batchSize > mMaxBatchSize)
{
throw std::runtime_error("Maximum batch size is " + std::to_string(mMaxBatchSize) + ". Cannot run with "
+ std::to_string(batchSize));
}
// determine whether to use internal storage or expose the intermediate
// mel spectrograms to the caller
float* melFramesDevice;
if (outputMelsDevice)
{
melFramesDevice = outputMelsDevice;
}
else
{
melFramesDevice = mBuffer.getMelsOnDevice();
}
int* melLengths;
if (outputNumMels)
{
melLengths = outputNumMels;
}
else
{
melLengths = mNumFrames.data();
}
mTacotron->infer(
batchSize,
inputDevice,
inputSpacing,
inputLength,
mNumMaxMels,
melFramesDevice,
melLengths);
mWaveglow->infer(
batchSize,
melFramesDevice,
mNumMaxMels,
melLengths,
getMaxOutputSize(),
samplesDevice,
numSamples);
if (mDenoiser)
{
mDenoiser->infer(batchSize, samplesDevice, getMaxOutputSize(), numSamples, samplesDevice);
}
stopTiming();
}
void SpeechSynthesizer::inferFromHost(const int batchSize, const int* const inputHost, const int inputSpacing,
const int* const inputLength, float* const samplesHost, int* const numSamples, float* const outputMelsHost,
int* const outputNumMels)
{
if (batchSize > mMaxBatchSize)
{
throw std::runtime_error("Maximum batch size is " + std::to_string(mMaxBatchSize) + ". Cannot run with "
+ std::to_string(batchSize));
}
startTiming();
// copy data to GPU and do any lazy allocation
const size_t inputSize = inputSpacing * batchSize;
mBuffer.copyToDevice(inputHost, inputSize);
stopTiming();
infer(batchSize, mBuffer.getInputOnDevice(), inputSpacing, inputLength, mBuffer.getSamplesOnDevice(), numSamples,
mBuffer.getMelsOnDevice(), outputNumMels);
startTiming();
const size_t melSize = mTacotron->getNumMelChannels() * getMelSpacing() * batchSize;
const size_t outputSize = getMaxOutputSize() * batchSize;
mBuffer.copyFromDevice(outputMelsHost, melSize, samplesHost, outputSize);
stopTiming();
}
void SpeechSynthesizer::inferFromHost(
const int batchSize, const std::vector<int32_t>* const inputHost, std::vector<float>* const outputHost)
{
startTiming();
if (batchSize > mMaxBatchSize)
{
throw std::runtime_error("Maximum batch size is " + std::to_string(mMaxBatchSize) + ". Cannot run with "
+ std::to_string(batchSize));
}
// copy data to GPU and do any lazy allocation
int inputSpacing;
mBuffer.copyToDevice(batchSize, inputHost, inputSpacing);
stopTiming();
// setup input lengths
for (int i = 0; i < batchSize; ++i)
{
mNumSymbols[i] = static_cast<int>(inputHost[i].size());
assert(mNumSymbols[i] <= inputSpacing);
}
infer(batchSize, mBuffer.getInputOnDevice(), inputSpacing, mNumSymbols.data(), mBuffer.getSamplesOnDevice(),
mNumSamples.data());
startTiming();
mBuffer.copyFromDevice(batchSize, outputHost, getMaxOutputSize(), mNumSamples.data());
stopTiming();
}
int SpeechSynthesizer::getMaxBatchSize() const
{
return mMaxBatchSize;
}
int SpeechSynthesizer::getMaxInputSize() const
{
return mTacotron->getMaximumInputLength();
}
int SpeechSynthesizer::getMelSpacing() const
{
return mNumMaxMels;
}
int SpeechSynthesizer::getMaxOutputSize() const
{
return mNumMaxMels * mWaveglow->getNumberOfSamplesPerFrame();
}
} // namespace tts
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/perf_analyzer | perf_analyzer | perf_config | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
from .exceptions import PerfAnalyzerException
class PerfAnalyzerConfig:
"""
A config class to set arguments to the perf_analyzer.
An argument set to None will use the perf_analyzer's default.
"""
perf_analyzer_args = [
"async",
"sync",
"measurement-interval",
"measurement-mode",
"measurement-request-count",
"concurrency-range",
"request-rate-range",
"request-distribution",
"request-intervals",
"binary-search",
"num-of-sequence",
"latency-threshold",
"max-threads",
"stability-percentage",
"max-trials",
"percentile",
"input-data",
"shared-memory",
"output-shared-memory-size",
"sequence-length",
"string-length",
"string-data",
]
perf_analyzer_multiple_args = [
"shape",
]
input_to_options = [
"model-name",
"model-version",
"batch-size",
"url",
"protocol",
"latency-report-file",
"streaming",
]
input_to_verbose = ["verbose", "extra-verbose"]
def __init__(self):
"""
Construct a PerfAnalyzerConfig
"""
self._args = {k: None for k in self.perf_analyzer_args}
self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args}
self._options = {
"-m": None,
"-x": None,
"-b": None,
"-u": None,
"-i": None,
"-f": None,
"-H": None,
"-c": None,
"-t": None,
}
self._verbose = {"-v": None, "-v -v": None}
self._input_to_options = {
"model-name": "-m",
"model-version": "-x",
"batch-size": "-b",
"url": "-u",
"protocol": "-i",
"latency-report-file": "-f",
"streaming": "-H",
"concurrency": "-c",
"threads": "-t",
}
self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"}
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into perf_analyzer
"""
return (
list(cls.perf_analyzer_args)
+ list(cls.perf_analyzer_multiple_args)
+ list(cls.input_to_options)
+ list(cls.input_to_verbose)
)
def update_config(self, params=None):
"""
Allows setting values from a
params dict
Parameters
----------
params: dict
keys are allowed args to perf_analyzer
"""
if params:
for key in params:
self[key] = params[key]
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the perf_analyzer with CLI.
Returns
-------
str
cli command string consisting of all arguments
to the perf_analyzer set in the config, without
the executable name.
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [k for k, v in self._verbose.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
for k, v in self._multiple_args.items():
for item in v:
args.append(f"--{k}={item}")
return " ".join(args)
def __getitem__(self, key: str):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the perf_analyzer
Returns
-------
The value that the argument is set to in this config
Raises
------
TritonModelAnalyzerException
If argument not found in the config
"""
if key in self._args:
return self._args[key]
elif key in self._multiple_args:
return self._multiple_args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
elif key in self._input_to_verbose:
return self._verbose[self._input_to_verbose[key]]
else:
raise PerfAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key: str, value: Any):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the perf_analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._multiple_args:
self._multiple_args[key].append(value)
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
elif key in self._input_to_verbose:
self._verbose[self._input_to_verbose[key]] = value
else:
raise PerfAnalyzerException(
f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer."
)
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | losses_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.research.vale.object_detection.losses."""
import math
import numpy as np
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import matcher
class WeightedL2LocalizationLossTest(tf.test.TestCase):
def testReturnsCorrectWeightedLoss(self):
batch_size = 3
num_anchors = 10
code_size = 4
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32)
loss_op = losses.WeightedL2LocalizationLoss()
loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor,
weights=weights))
expected_loss = (3 * 5 * 4) / 2.0
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectAnchorwiseLoss(self):
batch_size = 3
num_anchors = 16
code_size = 4
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.ones([batch_size, num_anchors])
loss_op = losses.WeightedL2LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
expected_loss = np.ones((batch_size, num_anchors)) * 2
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectNanLoss(self):
batch_size = 3
num_anchors = 10
code_size = 4
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.concat([
tf.zeros([batch_size, num_anchors, code_size / 2]),
tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan
], axis=2)
weights = tf.ones([batch_size, num_anchors])
loss_op = losses.WeightedL2LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
ignore_nan_targets=True)
loss = tf.reduce_sum(loss)
expected_loss = (3 * 5 * 4) / 2.0
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectWeightedLossWithLossesMask(self):
batch_size = 4
num_anchors = 10
code_size = 4
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32)
losses_mask = tf.constant([True, False, True, True], tf.bool)
loss_op = losses.WeightedL2LocalizationLoss()
loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor,
weights=weights, losses_mask=losses_mask))
expected_loss = (3 * 5 * 4) / 2.0
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, expected_loss)
class WeightedSmoothL1LocalizationLossTest(tf.test.TestCase):
def testReturnsCorrectLoss(self):
batch_size = 2
num_anchors = 3
code_size = 4
prediction_tensor = tf.constant([[[2.5, 0, .4, 0],
[0, 0, 0, 0],
[0, 2.5, 0, .4]],
[[3.5, 0, 0, 0],
[0, .4, 0, .9],
[0, 0, 1.5, 0]]], tf.float32)
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[2, 1, 1],
[0, 3, 0]], tf.float32)
loss_op = losses.WeightedSmoothL1LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
exp_loss = 7.695
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
batch_size = 3
num_anchors = 3
code_size = 4
prediction_tensor = tf.constant([[[2.5, 0, .4, 0],
[0, 0, 0, 0],
[0, 2.5, 0, .4]],
[[3.5, 0, 0, 0],
[0, .4, 0, .9],
[0, 0, 1.5, 0]],
[[3.5, 7., 0, 0],
[0, .4, 0, .9],
[2.2, 2.2, 1.5, 0]]], tf.float32)
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[2, 1, 1],
[0, 3, 0],
[4, 3, 0]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSmoothL1LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
exp_loss = 7.695
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
class WeightedIOULocalizationLossTest(tf.test.TestCase):
def testReturnsCorrectLoss(self):
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, .5, .25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[50, 50, 500.5, 100.25]]])
weights = [[1.0, .5, 2.0]]
loss_op = losses.WeightedIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
exp_loss = 2.0
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithNoLabels(self):
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, .5, .25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[50, 50, 500.5, 100.25]]])
weights = [[1.0, .5, 2.0]]
losses_mask = tf.constant([False], tf.bool)
loss_op = losses.WeightedIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
exp_loss = 0.0
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
class WeightedSigmoidClassificationLossTest(tf.test.TestCase):
def testReturnsCorrectLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
exp_loss = -2 * math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss, axis=2)
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithClassIndices(self):
prediction_tensor = tf.constant([[[-100, 100, -100, 100],
[100, -100, -100, -100],
[100, 0, -100, 100],
[-100, -100, 100, -100]],
[[-100, 0, 100, 100],
[-100, 100, -100, 100],
[100, 100, 100, 100],
[0, 0, -1, 100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 1, 1]],
[[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 1, 1, 0],
[1, 0, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 0, 0]]], tf.float32)
# Ignores the last class.
class_indices = tf.constant([0, 1, 2], tf.int32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
class_indices=class_indices)
loss = tf.reduce_sum(loss, axis=2)
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss_per_anchor = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss_per_anchor)
exp_loss = -2 * math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllEqual(prediction_tensor.shape.as_list(),
loss_per_anchor.shape.as_list())
self.assertAllEqual(target_tensor.shape.as_list(),
loss_per_anchor.shape.as_list())
self.assertAllClose(loss_output, exp_loss)
def _logit(probability):
return math.log(probability / (1. - probability))
class SigmoidFocalClassificationLossTest(tf.test.TestCase):
def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.97)],
[_logit(0.91)],
[_logit(0.73)],
[_logit(0.27)],
[_logit(0.09)],
[_logit(0.03)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]])
def testHardExamplesProduceLossComparableToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]])
def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights))
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAlmostEqual(order_of_ratio, 1.)
def testIgnoreNegativeExampleLossViaAlphaMultiplier(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=1.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(focal_loss[0][3:], [0., 0.])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss[0][:3] /
focal_loss[0][:3])))
self.assertAllClose(order_of_ratio, [1., 1., 1.])
def testIgnorePositiveExampleLossViaAlphaMultiplier(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(focal_loss[0][:3], [0., 0., 0.])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss[0][3:] /
focal_loss[0][3:])))
self.assertAllClose(order_of_ratio, [1., 1.])
def testSimilarToSigmoidXEntropyWithHalfAlphaAndZeroGammaUpToAScale(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.5, gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = focal_loss_op(prediction_tensor, target_tensor,
weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
weights=weights)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(sigmoid_loss, focal_loss * 2)
def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=None, gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = focal_loss_op(prediction_tensor, target_tensor,
weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
weights=weights)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(sigmoid_loss, focal_loss)
def testExpectedLossWithAlphaOneAndZeroGamma(self):
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=1.0, gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
with self.test_session() as sess:
focal_loss = sess.run(focal_loss)
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor
1.0 * # alpha
8), # positives from 8 anchors
focal_loss)
def testExpectedLossWithAlpha75AndZeroGamma(self):
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75, gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
with self.test_session() as sess:
focal_loss = sess.run(focal_loss)
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor.
((0.75 * # alpha for positives.
8) + # positives from 8 anchors.
(0.25 * # alpha for negatives.
8 * 2))), # negatives from 8 anchors for two classes.
focal_loss)
def testExpectedLossWithLossesMask(self):
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75, gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights,
losses_mask=losses_mask))
with self.test_session() as sess:
focal_loss = sess.run(focal_loss)
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor.
((0.75 * # alpha for positives.
8) + # positives from 8 anchors.
(0.25 * # alpha for negatives.
8 * 2))), # negatives from 8 anchors for two classes.
focal_loss)
class WeightedSoftmaxClassificationLossTest(tf.test.TestCase):
def testReturnsCorrectLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
exp_loss = - 1.5 * math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLossWithHighLogitScaleSetting(self):
"""At very high logit_scale, all predictions will be ~0.33."""
# TODO(yonib): Also test logit_scale with anchorwise=False.
logit_scale = 10e16
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss(logit_scale=logit_scale)
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
uniform_distribution_loss = - math.log(.33333333333)
exp_loss = np.matrix([[uniform_distribution_loss] * 4,
[uniform_distribution_loss] * 4])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
exp_loss = - 1.5 * math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
class WeightedSoftmaxClassificationAgainstLogitsLossTest(tf.test.TestCase):
def testReturnsCorrectLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 1]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
exp_loss = - 1.5 * math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 0]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLossWithLogitScaleSetting(self):
logit_scale = 100.
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 0]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=logit_scale)
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
# find softmax of the two prediction types above
softmax_pred1 = [np.exp(-1), np.exp(-1), np.exp(1)]
softmax_pred1 /= sum(softmax_pred1)
softmax_pred2 = [np.exp(0), np.exp(0), np.exp(-1)]
softmax_pred2 /= sum(softmax_pred2)
# compute the expected cross entropy for perfect matches
exp_entropy1 = sum(
[-x*np.log(x) for x in softmax_pred1])
exp_entropy2 = sum(
[-x*np.log(x) for x in softmax_pred2])
# weighted expected losses
exp_loss = np.matrix(
[[exp_entropy1, exp_entropy1, exp_entropy2*.5, exp_entropy1],
[exp_entropy2, exp_entropy1, exp_entropy1, 0.]])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
class BootstrappedSigmoidClassificationLossTest(tf.test.TestCase):
def testReturnsCorrectLossSoftBootstrapping(self):
prediction_tensor = tf.constant([[[-100, 100, 0],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='soft')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
exp_loss = -math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossHardBootstrapping(self):
prediction_tensor = tf.constant([[[-100, 100, 0],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='hard')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
exp_loss = -math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='hard')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss, axis=2)
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
class HardExampleMinerTest(tf.test.TestCase):
def testHardMiningWithSingleLossType(self):
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
# Uses only location loss to select hard examples
loss_op = losses.HardExampleMiner(num_hard_examples=1,
iou_threshold=0.0,
loss_type='loc',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
exp_loc_loss = 100 + 3
exp_cls_loss = 0 + 0
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testHardMiningWithBothLossType(self):
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=1,
iou_threshold=0.0,
loss_type='both',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
exp_loc_loss = 80 + 0
exp_cls_loss = 50 + 9
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testHardMiningNMS(self):
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.9, 0.9, 0.99, 0.99],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=2,
iou_threshold=0.5,
loss_type='cls',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
exp_loc_loss = 0 + 90 + 0 + 1
exp_cls_loss = 110 + 10 + 9 + 6
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testEnforceNegativesPerPositiveRatio(self):
location_losses = tf.constant([[100, 90, 80, 0, 1, 2,
3, 10, 20, 100, 20, 3]], tf.float32)
cls_losses = tf.constant([[0, 0, 100, 0, 90, 70,
0, 60, 0, 17, 13, 0]], tf.float32)
box_corners = tf.constant([[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.5, 0.1],
[0.0, 0.0, 0.6, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.8, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 1.0, 0.1],
[0.0, 0.0, 1.1, 0.1],
[0.0, 0.0, 0.2, 0.1]], tf.float32)
match_results = tf.constant([2, -1, 0, -1, -1, 1, -1, -1, -1, -1, -1, 3])
match_list = [matcher.Match(match_results)]
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
max_negatives_per_positive_list = [0.0, 0.5, 1.0, 1.5, 10]
exp_loc_loss_list = [80 + 2,
80 + 1 + 2,
80 + 1 + 2 + 10,
80 + 1 + 2 + 10 + 100,
80 + 1 + 2 + 10 + 100 + 20]
exp_cls_loss_list = [100 + 70,
100 + 90 + 70,
100 + 90 + 70 + 60,
100 + 90 + 70 + 60 + 17,
100 + 90 + 70 + 60 + 17 + 13]
for max_negatives_per_positive, exp_loc_loss, exp_cls_loss in zip(
max_negatives_per_positive_list, exp_loc_loss_list, exp_cls_loss_list):
loss_op = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=0.9999, loss_type='cls',
cls_loss_weight=1, loc_loss_weight=1,
max_negatives_per_positive=max_negatives_per_positive)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list, match_list)
loss_op.summarize()
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testEnforceNegativesPerPositiveRatioWithMinNegativesPerImage(self):
location_losses = tf.constant([[100, 90, 80, 0, 1, 2,
3, 10, 20, 100, 20, 3]], tf.float32)
cls_losses = tf.constant([[0, 0, 100, 0, 90, 70,
0, 60, 0, 17, 13, 0]], tf.float32)
box_corners = tf.constant([[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.5, 0.1],
[0.0, 0.0, 0.6, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.8, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 1.0, 0.1],
[0.0, 0.0, 1.1, 0.1],
[0.0, 0.0, 0.2, 0.1]], tf.float32)
match_results = tf.constant([-1] * 12)
match_list = [matcher.Match(match_results)]
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
min_negatives_per_image_list = [0, 1, 2, 4, 5, 6]
exp_loc_loss_list = [0,
80,
80 + 1,
80 + 1 + 2 + 10,
80 + 1 + 2 + 10 + 100,
80 + 1 + 2 + 10 + 100 + 20]
exp_cls_loss_list = [0,
100,
100 + 90,
100 + 90 + 70 + 60,
100 + 90 + 70 + 60 + 17,
100 + 90 + 70 + 60 + 17 + 13]
for min_negatives_per_image, exp_loc_loss, exp_cls_loss in zip(
min_negatives_per_image_list, exp_loc_loss_list, exp_cls_loss_list):
loss_op = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=0.9999, loss_type='cls',
cls_loss_weight=1, loc_loss_weight=1,
max_negatives_per_positive=3,
min_negatives_per_image=min_negatives_per_image)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list, match_list)
with self.test_session() as sess:
loc_loss_output = sess.run(loc_loss)
self.assertAllClose(loc_loss_output, exp_loc_loss)
cls_loss_output = sess.run(cls_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Forecasting/TFT/triton | triton | export_model | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseSaver,
Format,
load_from_file,
)
from .deployment_toolkit.extensions import loaders, savers # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("export_model")
INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.PYT]
OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TS_TRACE, Format.TS_SCRIPT, Format.ONNX]
def _get_args():
parser = argparse.ArgumentParser(
description="Script for exporting models from supported frameworks.", allow_abbrev=False
)
parser.add_argument("--input-path", help="Path to input python module", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
if args.input_type == Format.PYT.value and args.output_type == Format.ONNX.value:
saver_type = f"{Format.PYT.value}--{Format.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
dataloader_fn = None
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
model = loader.load(args.input_path, dataloader_fn=dataloader_fn, output_type=args.output_type)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if args.input_type == Format.PYT.value and args.output_type == Format.ONNX.value:
saver_type = f"{Format.PYT.value}--{Format.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path, dataloader_fn)
if __name__ == "__main__":
main()
|
TensorFlow/Segmentation/UNet_Medical/examples | examples | unet_TF-AMP_8GPU | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches U-Net run in FP16 on 8 GPUs and trains for 6400 iterations batch_size 8. Usage:
# bash unet_TF-AMP_8GPU.sh <path to dataset> <path to results directory>
horovodrun -np 8 python main.py --data_dir $1 --model_dir $2 --log_every 100 --max_steps 6400 --batch_size 8 --exec_mode train_and_evaluate --crossvalidation_idx 0 --augment --xla --amp |
PyTorch/LanguageModeling/BERT/triton/runner | runner | stages | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Optional, Tuple, Union
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
class ResultsType:
"""
Results types generated by runner
"""
TRITON_PERFORMANCE_OFFLINE = "triton_performance_offline"
TRITON_PERFORMANCE_ONLINE = "triton_performance_online"
class Stage:
"""
Stage definition
"""
label: str
commands: List[Command]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
commands: Union[Tuple[str, ...], List[str]],
result_path: Optional[str] = None,
result_type: Optional[str] = None,
):
"""
Args:
commands: List or Tuple of commands provided as raw string
result_path: Path to results file generated by stage
result_type: Type of results generated by stage
"""
if type(commands) not in [tuple, list]:
raise ValueError("""Incorrect type of commands list. Please, provide list of commands as tuple.""")
self.commands = list(map(lambda command: Command(data=command), commands))
self.result_path = result_path
self.result_type = result_type
class ExportStage(Stage):
label = "Export Model"
class ConversionStage(Stage):
label = "Convert Model"
class DeployStage(Stage):
label = "Deploy Model"
class CorrectnessStage(Stage):
label = "Model Correctness Tests"
class TritonPreparePerformanceProfilingDataStage(Stage):
label = "Prepare Triton Profiling Data"
class TritonPerformanceOfflineStage(Stage):
label = "Triton Performance Offline Tests"
class TritonPerformanceOnlineStage(Stage):
label = "Triton Performance Online Tests"
|
PyTorch/SpeechSynthesis/FastPitch/filelists | filelists | ljs_audio_text_val | wavs/LJ016-0288.wav|"MΓΌller, MΓΌller, He's the man," till a diversion was created by the appearance of the gallows, which was received with continuous yells.
wavs/LJ028-0275.wav|At last, in the twentieth month,
wavs/LJ019-0273.wav|which Sir Joshua Jebb told the committee he considered the proper elements of penal discipline.
wavs/LJ021-0145.wav|From those willing to join in establishing this hoped-for period of peace,
wavs/LJ009-0076.wav|We come to the sermon.
wavs/LJ048-0194.wav|during the morning of November twenty-two prior to the motorcade.
wavs/LJ049-0050.wav|Hill had both feet on the car and was climbing aboard to assist President and Mrs. Kennedy.
wavs/LJ022-0023.wav|The overwhelming majority of people in this country know how to sift the wheat from the chaff in what they hear and what they read.
wavs/LJ034-0053.wav|reached the same conclusion as Latona that the prints found on the cartons were those of Lee Harvey Oswald.
wavs/LJ035-0129.wav|and she must have run down the stairs ahead of Oswald and would probably have seen or heard him.
wavs/LJ039-0075.wav|once you know that you must put the crosshairs on the target and that is all that is necessary.
wavs/LJ046-0184.wav|but there is a system for the immediate notification of the Secret Service by the confining institution when a subject is released or escapes.
wavs/LJ003-0111.wav|He was in consequence put out of the protection of their internal law, end quote. Their code was a subject of some curiosity.
wavs/LJ037-0234.wav|Mrs. Mary Brock, the wife of a mechanic who worked at the station, was there at the time and she saw a white male,
wavs/LJ047-0044.wav|Oswald was, however, willing to discuss his contacts with Soviet authorities. He denied having any involvement with Soviet intelligence agencies
wavs/LJ028-0081.wav|Years later, when the archaeologists could readily distinguish the false from the true,
wavs/LJ012-0161.wav|he was reported to have fallen away to a shadow.
wavs/LJ009-0114.wav|Mr. Wakefield winds up his graphic but somewhat sensational account by describing another religious service, which may appropriately be inserted here.
wavs/LJ028-0335.wav|accordingly they committed to him the command of their whole army, and put the keys of their city into his hands.
wavs/LJ005-0014.wav|Speaking on a debate on prison matters, he declared that
wavs/LJ008-0294.wav|nearly indefinitely deferred.
wavs/LJ028-0307.wav|then let twenty days pass, and at the end of that time station near the Chaldasan gates a body of four thousand.
wavs/LJ046-0058.wav|During his Presidency, Franklin D. Roosevelt made almost four hundred journeys and traveled more than three hundred fifty thousand miles.
wavs/LJ046-0146.wav|The criteria in effect prior to November twenty-two, nineteen sixty-three, for determining whether to accept material for the PRS general files
wavs/LJ017-0131.wav|even when the high sheriff had told him there was no possibility of a reprieve, and within a few hours of execution.
wavs/LJ002-0018.wav|The inadequacy of the jail was noticed and reported upon again and again by the grand juries of the city of London,
wavs/LJ019-0257.wav|Here the tread-wheel was in use, there cellular cranks, or hard-labor machines.
wavs/LJ034-0042.wav|that he could only testify with certainty that the print was less than three days old.
wavs/LJ031-0070.wav|Dr. Clark, who most closely observed the head wound,
wavs/LJ012-0035.wav|the number and names on watches, were carefully removed or obliterated after the goods passed out of his hands.
wavs/LJ050-0168.wav|with the particular purposes of the agency involved. The Commission recognizes that this is a controversial area
wavs/LJ036-0103.wav|The police asked him whether he could pick out his passenger from the lineup.
wavs/LJ016-0318.wav|Other officials, great lawyers, governors of prisons, and chaplains supported this view.
wavs/LJ034-0198.wav|Euins, who was on the southwest corner of Elm and Houston Streets testified that he could not describe the man he saw in the window.
wavs/LJ049-0026.wav|On occasion the Secret Service has been permitted to have an agent riding in the passenger compartment with the President.
wavs/LJ011-0096.wav|He married a lady also belonging to the Society of Friends, who brought him a large fortune, which, and his own money, he put into a city firm,
wavs/LJ040-0002.wav|Chapter seven. Lee Harvey Oswald: Background and Possible Motives, Part one.
wavs/LJ014-0030.wav|These were damnatory facts which well supported the prosecution.
wavs/LJ043-0002.wav|The Warren Commission Report. By The President's Commission on the Assassination of President Kennedy. Chapter seven. Lee Harvey Oswald:
wavs/LJ029-0022.wav|The original plan called for the President to spend only one day in the State, making whirlwind visits to Dallas, Fort Worth, San Antonio, and Houston.
wavs/LJ014-0020.wav|He was soon afterwards arrested on suspicion, and a search of his lodgings brought to light several garments saturated with blood;
wavs/LJ040-0027.wav|He was never satisfied with anything.
wavs/LJ028-0093.wav|but his scribe wrote it in the manner customary for the scribes of those days to write of their royal masters.
wavs/LJ004-0152.wav|although at Mr. Buxton's visit a new jail was in process of erection, the first step towards reform since Howard's visitation in seventeen seventy-four.
wavs/LJ008-0111.wav|They entered a "stone cold room," and were presently joined by the prisoner.
wavs/LJ017-0044.wav|and the deepest anxiety was felt that the crime, if crime there had been, should be brought home to its perpetrator.
wavs/LJ033-0047.wav|I noticed when I went out that the light was on, end quote,
wavs/LJ028-0008.wav|you tap gently with your heel upon the shoulder of the dromedary to urge her on.
wavs/LJ016-0179.wav|contracted with sheriffs and conveners to work by the job.
wavs/LJ005-0201.wav|as is shown by the report of the Commissioners to inquire into the state of the municipal corporations in eighteen thirty-five.
wavs/LJ035-0019.wav|drove to the northwest corner of Elm and Houston, and parked approximately ten feet from the traffic signal.
wavs/LJ031-0038.wav|The first physician to see the President at Parkland Hospital was Dr. Charles J. Carrico, a resident in general surgery.
wavs/LJ017-0070.wav|but his sporting operations did not prosper, and he became a needy man, always driven to desperate straits for cash.
wavs/LJ007-0154.wav|These pungent and well-grounded strictures applied with still greater force to the unconvicted prisoner, the man who came to the prison innocent, and still uncontaminated,
wavs/LJ002-0043.wav|long narrow rooms -- one thirty-six feet, six twenty-three feet, and the eighth eighteen,
wavs/LJ004-0096.wav|the fatal consequences whereof might be prevented if the justices of the peace were duly authorized
wavs/LJ018-0081.wav|his defense being that he had intended to commit suicide, but that, on the appearance of this officer who had wronged him,
wavs/LJ042-0129.wav|No night clubs or bowling alleys, no places of recreation except the trade union dances. I have had enough.
wavs/LJ008-0278.wav|or theirs might be one of many, and it might be considered necessary to "make an example."
wavs/LJ015-0203.wav|but were the precautions too minute, the vigilance too close to be eluded or overcome?
wavs/LJ018-0239.wav|His disappearance gave color and substance to evil reports already in circulation that the will and conveyance above referred to
wavs/LJ021-0066.wav|together with a great increase in the payrolls, there has come a substantial rise in the total of industrial profits
wavs/LJ024-0083.wav|This plan of mine is no attack on the Court;
wavs/LJ008-0258.wav|Let me retrace my steps, and speak more in detail of the treatment of the condemned in those bloodthirsty and brutally indifferent days,
wavs/LJ038-0199.wav|eleven. If I am alive and taken prisoner,
wavs/LJ045-0230.wav|when he was finally apprehended in the Texas Theatre. Although it is not fully corroborated by others who were present,
wavs/LJ027-0141.wav|is closely reproduced in the life-history of existing deer. Or, in other words,
wavs/LJ016-0020.wav|He never reached the cistern, but fell back into the yard, injuring his legs severely.
wavs/LJ012-0250.wav|On the seventh July, eighteen thirty-seven,
wavs/LJ001-0110.wav|Even the Caslon type when enlarged shows great shortcomings in this respect:
wavs/LJ047-0148.wav|On October twenty-five,
wavs/LJ031-0134.wav|On one occasion Mrs. Johnson, accompanied by two Secret Service agents, left the room to see Mrs. Kennedy and Mrs. Connally.
wavs/LJ036-0174.wav|This is the approximate time he entered the roominghouse, according to Earlene Roberts, the housekeeper there.
wavs/LJ026-0068.wav|Energy enters the plant, to a small extent,
wavs/LJ034-0160.wav|on Brennan's subsequent certain identification of Lee Harvey Oswald as the man he saw fire the rifle.
wavs/LJ013-0164.wav|who came from his room ready dressed, a suspicious circumstance, as he was always late in the morning.
wavs/LJ014-0263.wav|When other pleasures palled he took a theatre, and posed as a munificent patron of the dramatic art.
wavs/LJ005-0079.wav|and improve the morals of the prisoners, and shall insure the proper measure of punishment to convicted offenders.
wavs/LJ048-0228.wav|and others who were present say that no agent was inebriated or acted improperly.
wavs/LJ027-0052.wav|These principles of homology are essential to a correct interpretation of the facts of morphology.
wavs/LJ004-0045.wav|Mr. Sturges Bourne, Sir James Mackintosh, Sir James Scarlett, and William Wilberforce.
wavs/LJ012-0042.wav|which he kept concealed in a hiding-place with a trap-door just under his bed.
wavs/LJ014-0110.wav|At the first the boxes were impounded, opened, and found to contain many of O'Connor's effects.
wavs/LJ028-0506.wav|A modern artist would have difficulty in doing such accurate work.
wavs/LJ014-0010.wav|yet he could not overcome the strange fascination it had for him, and remained by the side of the corpse till the stretcher came.
wavs/LJ042-0096.wav|(old exchange rate) in addition to his factory salary of approximately equal amount
wavs/LJ031-0202.wav|Mrs. Kennedy chose the hospital in Bethesda for the autopsy because the President had served in the Navy.
wavs/LJ012-0235.wav|While they were in a state of insensibility the murder was committed.
wavs/LJ019-0186.wav|seeing that since the establishment of the Central Criminal Court, Newgate received prisoners for trial from several counties,
wavs/LJ018-0098.wav|and recognized as one of the frequenters of the bogus law-stationers. His arrest led to that of others.
wavs/LJ036-0077.wav|Roger D. Craig, a deputy sheriff of Dallas County,
wavs/LJ045-0140.wav|The arguments he used to justify his use of the alias suggest that Oswald may have come to think that the whole world was becoming involved
wavs/LJ029-0032.wav|According to O'Donnell, quote, we had a motorcade wherever we went, end quote.
wavs/LJ003-0345.wav|All the committee could do in this respect was to throw the responsibility on others.
wavs/LJ008-0307.wav|afterwards express a wish to murder the Recorder for having kept them so long in suspense.
wavs/LJ043-0030.wav|If somebody did that to me, a lousy trick like that, to take my wife away, and all the furniture, I would be mad as hell, too.
wavs/LJ009-0238.wav|After this the sheriffs sent for another rope, but the spectators interfered, and the man was carried back to jail.
wavs/LJ039-0223.wav|Oswald's Marine training in marksmanship, his other rifle experience and his established familiarity with this particular weapon
wavs/LJ014-0076.wav|He was seen afterwards smoking and talking with his hosts in their back parlor, and never seen again alive.
wavs/LJ016-0138.wav|at a distance from the prison.
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers | layers | norm | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import Dict
import torch
import torch.nn as nn
from torch import Tensor
from torch.cuda.nvtx import range as nvtx_range
from se3_transformer.model.fiber import Fiber
@torch.jit.script
def clamped_norm(x, clamp: float):
return x.norm(p=2, dim=-1, keepdim=True).clamp(min=clamp)
@torch.jit.script
def rescale(x, norm, new_norm):
return x / norm * new_norm
class NormSE3(nn.Module):
"""
Norm-based SE(3)-equivariant nonlinearity.
βββ> feature_norm ββ> LayerNorm() ββ> ReLU() βββ
feature_in βββ€ * ββ> feature_out
βββ> feature_phase βββββββββββββββββββββββββββββ
"""
NORM_CLAMP = 2 ** -24 # Minimum positive subnormal for FP16
def __init__(self, fiber: Fiber, nonlinearity: nn.Module = nn.ReLU()):
super().__init__()
self.fiber = fiber
self.nonlinearity = nonlinearity
if len(set(fiber.channels)) == 1:
# Fuse all the layer normalizations into a group normalization
self.group_norm = nn.GroupNorm(num_groups=len(fiber.degrees), num_channels=sum(fiber.channels))
else:
# Use multiple layer normalizations
self.layer_norms = nn.ModuleDict({
str(degree): nn.LayerNorm(channels)
for degree, channels in fiber
})
def forward(self, features: Dict[str, Tensor], *args, **kwargs) -> Dict[str, Tensor]:
with nvtx_range('NormSE3'):
output = {}
if hasattr(self, 'group_norm'):
# Compute per-degree norms of features
norms = [clamped_norm(features[str(d)], self.NORM_CLAMP)
for d in self.fiber.degrees]
fused_norms = torch.cat(norms, dim=-2)
# Transform the norms only
new_norms = self.nonlinearity(self.group_norm(fused_norms.squeeze(-1))).unsqueeze(-1)
new_norms = torch.chunk(new_norms, chunks=len(self.fiber.degrees), dim=-2)
# Scale features to the new norms
for norm, new_norm, d in zip(norms, new_norms, self.fiber.degrees):
output[str(d)] = rescale(features[str(d)], norm, new_norm)
else:
for degree, feat in features.items():
norm = clamped_norm(feat, self.NORM_CLAMP)
new_norm = self.nonlinearity(self.layer_norms[degree](norm.squeeze(-1)).unsqueeze(-1))
output[degree] = rescale(new_norm, feat, norm)
return output
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph | graph | utils | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
def timed(F, desc):
def inner(*args, **kwargs):
start = time.perf_counter()
res = F(*args, **kwargs)
elapsed = time.perf_counter() - start
print(f'"{desc}" took {elapsed:.2f}s')
return res
return inner
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc | preproc | run_spark_gpu | #!/bin/bash
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
# File Name: run_spark_gpu.sh
set -e
# the data path including 1TB criteo data, day_0, day_1, ...
export INPUT_PATH=${1:-'/data/dlrm/criteo'}
# the output path, use for generating the dictionary and the final dataset
# the output folder should have more than 300GB
export OUTPUT_PATH=${2:-'/data/dlrm/spark/output'}
export FREQUENCY_LIMIT=${3:-'15'}
export HARDWARE_PLATFORM=${4:-'DGX2'}
# spark local dir should have about 3TB
# the temporary path used for spark shuffle write
export SPARK_LOCAL_DIRS='/data/dlrm/spark/tmp'
if [[ $HARDWARE_PLATFORM == DGX2 ]]; then
source dgx2_config.sh
else
echo "Unknown hardware platform ${HARDWARE_PLATFORM}"
exit 1
fi
OPTS="--frequency_limit $FREQUENCY_LIMIT"
export SPARK_HOME=/opt/spark
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$SPARK_HOME/bin:$SPARK_HOME/sbin:$PATH
# we use spark standalone to run the job
export MASTER=spark://$HOSTNAME:7077
echo "Starting spark standalone"
start-master.sh
start-slave.sh $MASTER
echo "Generating the dictionary..."
spark-submit --master $MASTER \
--driver-memory "${DRIVER_MEMORY}G" \
--executor-cores $NUM_EXECUTOR_CORES \
--executor-memory "${EXECUTOR_MEMORY}G" \
--conf spark.cores.max=$TOTAL_CORES \
--conf spark.task.cpus=1 \
--conf spark.sql.files.maxPartitionBytes=1073741824 \
--conf spark.sql.shuffle.partitions=600 \
--conf spark.driver.maxResultSize=2G \
--conf spark.locality.wait=0s \
--conf spark.network.timeout=1800s \
--conf spark.task.resource.gpu.amount=0.01 \
--conf spark.executor.resource.gpu.amount=1 \
--conf spark.plugins=com.nvidia.spark.SQLPlugin \
--conf spark.rapids.sql.concurrentGpuTasks=2 \
--conf spark.rapids.sql.reader.batchSizeRows=4000000 \
--conf spark.rapids.memory.pinnedPool.size=16g \
--conf spark.rapids.sql.explain=ALL \
--conf spark.sql.autoBroadcastJoinThreshold=1GB \
--conf spark.rapids.sql.incompatibleOps.enabled=true \
--conf spark.driver.maxResultSize=2G \
--conf spark.executor.extraJavaOptions="-Dcom.nvidia.cudf.prefer-pinned=true\ -Djava.io.tmpdir=$SPARK_LOCAL_DIRS" \
spark_data_utils.py --mode generate_models \
$OPTS \
--input_folder $INPUT_PATH \
--days 0-23 \
--model_folder $OUTPUT_PATH/models \
--write_mode overwrite --low_mem 2>&1 | tee submit_dict_log.txt
echo "Transforming the train data from day_0 to day_22..."
spark-submit --master $MASTER \
--driver-memory "${DRIVER_MEMORY}G" \
--executor-cores $NUM_EXECUTOR_CORES \
--executor-memory "${EXECUTOR_MEMORY}G" \
--conf spark.cores.max=$TOTAL_CORES \
--conf spark.task.cpus=3 \
--conf spark.sql.files.maxPartitionBytes=1073741824 \
--conf spark.sql.shuffle.partitions=600 \
--conf spark.driver.maxResultSize=2G \
--conf spark.locality.wait=0s \
--conf spark.network.timeout=1800s \
--conf spark.task.resource.gpu.amount=0.01 \
--conf spark.executor.resource.gpu.amount=1 \
--conf spark.plugins=com.nvidia.spark.SQLPlugin \
--conf spark.rapids.sql.concurrentGpuTasks=2 \
--conf spark.rapids.sql.reader.batchSizeRows=4000000 \
--conf spark.rapids.memory.pinnedPool.size=16g \
--conf spark.rapids.sql.explain=ALL \
--conf spark.sql.autoBroadcastJoinThreshold=1GB \
--conf spark.rapids.sql.incompatibleOps.enabled=true \
--conf spark.driver.maxResultSize=2G \
--conf spark.executor.extraJavaOptions="-Dcom.nvidia.cudf.prefer-pinned=true\ -Djava.io.tmpdir=$SPARK_LOCAL_DIRS" \
spark_data_utils.py --mode transform \
--input_folder $INPUT_PATH \
--days 0-22 \
--output_folder $OUTPUT_PATH/train \
--model_size_file $OUTPUT_PATH/model_size.json \
--model_folder $OUTPUT_PATH/models \
--write_mode overwrite --low_mem 2>&1 | tee submit_train_log.txt
echo "Splitting the last day into 2 parts of test and validation..."
last_day=$INPUT_PATH/day_23
temp_test=$OUTPUT_PATH/temp/test
temp_validation=$OUTPUT_PATH/temp/validation
mkdir -p $temp_test $temp_validation
lines=`wc -l $last_day | awk '{print $1}'`
former=$((lines / 2))
latter=$((lines - former))
head -n $former $last_day > $temp_test/day_23
tail -n $latter $last_day > $temp_validation/day_23
echo "Transforming the test data in day_23..."
spark-submit --master $MASTER \
--driver-memory "${DRIVER_MEMORY}G" \
--executor-cores $NUM_EXECUTOR_CORES \
--executor-memory "${EXECUTOR_MEMORY}G" \
--conf spark.cores.max=$TOTAL_CORES \
--conf spark.task.cpus=1 \
--conf spark.sql.files.maxPartitionBytes=1073741824 \
--conf spark.sql.shuffle.partitions=30 \
--conf spark.driver.maxResultSize=2G \
--conf spark.locality.wait=0s \
--conf spark.network.timeout=1800s \
--conf spark.task.resource.gpu.amount=0.01 \
--conf spark.executor.resource.gpu.amount=1 \
--conf spark.plugins=com.nvidia.spark.SQLPlugin \
--conf spark.rapids.sql.concurrentGpuTasks=2 \
--conf spark.rapids.sql.reader.batchSizeRows=4000000 \
--conf spark.rapids.memory.pinnedPool.size=16g \
--conf spark.rapids.sql.explain=ALL \
--conf spark.sql.autoBroadcastJoinThreshold=1GB \
--conf spark.rapids.sql.incompatibleOps.enabled=true \
--conf spark.driver.maxResultSize=2G \
--conf spark.executor.extraJavaOptions="-Dcom.nvidia.cudf.prefer-pinned=true\ -Djava.io.tmpdir=$SPARK_LOCAL_DIRS" \
spark_data_utils.py --mode transform \
--input_folder $temp_test \
--days 23-23 \
--output_folder $OUTPUT_PATH/test \
--output_ordering input \
--model_folder $OUTPUT_PATH/models \
--write_mode overwrite --low_mem 2>&1 | tee submit_test_log.txt
echo "Transforming the validation data in day_23..."
spark-submit --master $MASTER \
--driver-memory "${DRIVER_MEMORY}G" \
--executor-cores $NUM_EXECUTOR_CORES \
--executor-memory "${EXECUTOR_MEMORY}G" \
--conf spark.cores.max=$TOTAL_CORES \
--conf spark.task.cpus=1 \
--conf spark.sql.files.maxPartitionBytes=1073741824 \
--conf spark.sql.shuffle.partitions=30 \
--conf spark.driver.maxResultSize=2G \
--conf spark.locality.wait=0s \
--conf spark.network.timeout=1800s \
--conf spark.task.resource.gpu.amount=0.01 \
--conf spark.executor.resource.gpu.amount=1 \
--conf spark.plugins=com.nvidia.spark.SQLPlugin \
--conf spark.rapids.sql.concurrentGpuTasks=2 \
--conf spark.rapids.sql.reader.batchSizeRows=4000000 \
--conf spark.rapids.memory.pinnedPool.size=16g \
--conf spark.rapids.sql.explain=ALL \
--conf spark.sql.autoBroadcastJoinThreshold=1GB \
--conf spark.rapids.sql.incompatibleOps.enabled=true \
--conf spark.driver.maxResultSize=2G \
--conf spark.executor.extraJavaOptions="-Dcom.nvidia.cudf.prefer-pinned=true\ -Djava.io.tmpdir=$SPARK_LOCAL_DIRS" \
spark_data_utils.py --mode transform \
--input_folder $temp_validation \
--days 23-23 \
--output_folder $OUTPUT_PATH/validation \
--output_ordering input \
--model_folder $OUTPUT_PATH/models \
--write_mode overwrite --low_mem 2>&1 | tee submit_validation_log.txt
rm -r $temp_test $temp_validation
stop-master.sh
stop-slave.sh
|
PyTorch/SpeechRecognition/Jasper/triton/scripts | scripts | download_triton_librispeech | #!/usr/bin/env bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Downloads the inference-subset of the Librispeech corpus.
DATA_SET="LibriSpeech"
DATA_ROOT_DIR="/datasets"
DATA_DIR="${DATA_ROOT_DIR}/${DATA_SET}"
if [ ! -d "$DATA_DIR" ]
then
mkdir -p $DATA_DIR
chmod go+rx $DATA_DIR
python utils/download_librispeech.py triton/triton_librispeech.csv $DATA_DIR -e ${DATA_ROOT_DIR}/
else
echo "Directory $DATA_DIR already exists."
fi
|
PyTorch/Classification/GPUNet/triton/runner/maintainer | maintainer | exceptions | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerNotStarted(Exception):
pass
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2PrenetPlugin | taco2PrenetPlugin | taco2PrenetLayerPlugin | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_PRENETLAYERPLUGIN_H
#define TT2I_PRENETLAYERPLUGIN_H
#include "taco2PrenetKernel.h"
#include "NvInfer.h"
#include <memory>
#include <string>
#include <vector>
namespace nvinfer1
{
namespace plugin
{
class Taco2PrenetLayerPlugin : public nvinfer1::IPluginV2DynamicExt
{
public:
using value_type = float;
/**
* @brief Get the name of this plugin.
*
* @return The name.
*/
static const char* getName();
/**
* @brief Get the version of this plugin.
*
* @return The version.
*/
static const char* getVersion();
/**
* @brief Create a new Taco2PrenetLayerPlugin plugin from serialized data.
*
* @param data The data.
* @param length The length of the data in bytes.
*
* @return The instantiated plugin.
*/
static Taco2PrenetLayerPlugin deserialize(const void* data, size_t length);
/**
* @brief Create a new Taco2PrenetLayerPlugin plugin.
*
* @param fcWeights1 The weights of the first fully connected layer.
* @param fcWeights2 The weights of the second fully connected layer.
* @param intputLength The input length.
* @param numDimension The number of dimensions.
*/
Taco2PrenetLayerPlugin(
const nvinfer1::Weights& fcWeights1, const nvinfer1::Weights& fcWeights2, int intputLength, int numDimension);
/**
* @brief The move constructor.
*
* @param other The Taco2PrenetLayerPlugin to move.
*/
Taco2PrenetLayerPlugin(Taco2PrenetLayerPlugin&& other);
~Taco2PrenetLayerPlugin();
// disable copying
Taco2PrenetLayerPlugin(const Taco2PrenetLayerPlugin& other) = delete;
Taco2PrenetLayerPlugin& operator=(const Taco2PrenetLayerPlugin& other) = delete;
/**
* @brief Return the data type of the plugin output at the requested index.
*
* @param index The output index.
* @param inputTypes The input data types.
* @param nbInputs The number of inputs.
*
* @return The type of output.
*/
nvinfer1::DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const override;
/**
* @brief Get the plugin type.
*
* @return The plugin type.
*/
const char* getPluginType() const override;
/**
* @brief Get the plugin version.
*
* @return The plugin version.
*/
const char* getPluginVersion() const override;
/**
* @brief Get the number of outputs.
*
* @return The number of outputs.
*/
int getNbOutputs() const override;
/**
* @brief Get the dimensions of an output tensor.
*
* @param outputIndex The index of the output tensor.
* @param inputs Expressions for dimensions of the input tensors.
* @param nbInputs The number of input tensors.
* @param expBuilder Object for generating new expressions.
*
* @return The resulting dimensions.
*/
nvinfer1::DimsExprs getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, IExprBuilder& expBuilder) override;
/**
* @brief Check if the given plugin format is supported.
*
* @param pos The format position/index in inOut.format[].
* @param inOut The input and output formats.
* @param nbInputs The number of inputs.
* @param nbOutputs The number of outputs.
*
* @return True if it is supported.
*/
bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) override;
/**
* @brief Configure this plugin with the given inputs, outputs, and datat
* types.
*
* @param in The input tensor descriptions.
* @param nbInputs The number of inputs.
* @param out The output tensor descriptions.
* @param nbOutputs The number of outputs.
*/
void configurePlugin(
const DynamicPluginTensorDesc* in, int nbInputs, const DynamicPluginTensorDesc* out, int nbOutputs) override;
/**
* @brief Initialize the plugin.
*
* @return 0 if initialization was successful. Non-zero otherwise.
*/
int initialize() override;
/**
* @brief Terminate the plugin (deinitialize).
*/
void terminate() override;
/**
* @brief Get workspace size required by this plugin for up to the given
* configuration.
*
* @param in The input tensor descriptions.
* @param nbInputs The number of inputs.
* @param out The output tensor descriptions.
* @param nbOutputs The number of outputs.
*
* @return The workspace size in bytes.
*/
size_t getWorkspaceSize(
const PluginTensorDesc* in, int nbInputs, const PluginTensorDesc* out, int nbOutputs) const override;
/**
* @brief Set this plugin for execution on the stream.
*
* @param inputDesc The input tensor descriptors.
* @param outputDesc The output tensor descriptors.
* @param inputs The input tensors.
* @param outputs The output tensors.
* @param workspace The allocated workspace.
* @param stream The stream to operate on.
*
* @return 0 if successfully queued, non-zero otherwise.
*/
int enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, cudaStream_t stream) override;
/**
* @brief Get the number of bytes occupied by this plugin if serialized.
*
* @return The size in bytes.
*/
size_t getSerializationSize() const override;
/**
* @brief Serialize this plugin.
*
* @param buffer The buffer to write to.
*/
void serialize(void* buffer) const override;
/**
* @brief Destroy this plugin instance.
*/
void destroy() override;
/**
* @brief Clone this pulgin instance.
*
* @return The cloned plugin.
*/
IPluginV2DynamicExt* clone() const override;
/**
* @brief Set the namespace of this plugin.
*
* @param pluginNamespace The namespace.
*/
void setPluginNamespace(const char* pluginNamespace) override;
/**
* @brief Get the namespace of this plugin.
*
* @return The namespace.
*/
const char* getPluginNamespace() const override;
private:
int mInputLength;
int mNumDimension;
std::vector<value_type> mWeights1Host;
std::vector<value_type> mWeights2Host;
std::unique_ptr<Taco2PrenetKernel> mKernel;
std::string mNamespace;
};
} // namespace plugin
} // namespace nvinfer1
#endif
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B0/training/TF32 | TF32 | train_benchmark_8xA100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b0_cfg.py \
--mode train_and_eval \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 3 \
--save_checkpoint_freq 5 \
--train_batch_size 512 \
--eval_batch_size 512 \
--augmenter_name autoaugment \
--lr_decay cosine \
--memory_limit 81000 \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005 |
PyTorch/Recommendation/NCF | NCF | convert_test | # Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import pandas as pd
import numpy as np
from load import implicit_load
from convert import save_feature_spec, _TestNegSampler, TEST_0, TEST_1, TRAIN_0, TRAIN_1
import torch
import os
USER_COLUMN = 'user_id'
ITEM_COLUMN = 'item_id'
def parse_args():
parser = ArgumentParser()
parser.add_argument('--path', type=str, default='/data/ml-20m/ratings.csv',
help='Path to reviews CSV file from MovieLens')
parser.add_argument('--output', type=str, default='/data',
help='Output directory for train and test files')
parser.add_argument('--valid_negative', type=int, default=100,
help='Number of negative samples for each positive test example')
parser.add_argument('--seed', '-s', type=int, default=1,
help='Manually set random seed for torch')
parser.add_argument('--test', type=str, help='select modification to be applied to the set')
return parser.parse_args()
def main():
args = parse_args()
if args.seed is not None:
torch.manual_seed(args.seed)
print("Loading raw data from {}".format(args.path))
df = implicit_load(args.path, sort=False)
if args.test == 'less_user':
to_drop = set(list(df[USER_COLUMN].unique())[-100:])
df = df[~df[USER_COLUMN].isin(to_drop)]
if args.test == 'less_item':
to_drop = set(list(df[ITEM_COLUMN].unique())[-100:])
df = df[~df[ITEM_COLUMN].isin(to_drop)]
if args.test == 'more_user':
sample = df.sample(frac=0.2).copy()
sample[USER_COLUMN] = sample[USER_COLUMN] + 10000000
df = df.append(sample)
users = df[USER_COLUMN]
df = df[users.isin(users[users.duplicated(keep=False)])] # make sure something remains in the train set
if args.test == 'more_item':
sample = df.sample(frac=0.2).copy()
sample[ITEM_COLUMN] = sample[ITEM_COLUMN] + 10000000
df = df.append(sample)
print("Mapping original user and item IDs to new sequential IDs")
df[USER_COLUMN] = pd.factorize(df[USER_COLUMN])[0]
df[ITEM_COLUMN] = pd.factorize(df[ITEM_COLUMN])[0]
user_cardinality = df[USER_COLUMN].max() + 1
item_cardinality = df[ITEM_COLUMN].max() + 1
# Need to sort before popping to get last item
df.sort_values(by='timestamp', inplace=True)
# clean up data
del df['rating'], df['timestamp']
df = df.drop_duplicates() # assuming it keeps order
# Test set is the last interaction for a given user
grouped_sorted = df.groupby(USER_COLUMN, group_keys=False)
test_data = grouped_sorted.tail(1).sort_values(by=USER_COLUMN)
# Train set is all interactions but the last one
train_data = grouped_sorted.apply(lambda x: x.iloc[:-1])
sampler = _TestNegSampler(train_data.values, args.valid_negative)
test_negs = sampler.generate().cuda()
if args.valid_negative > 0:
test_negs = test_negs.reshape(-1, args.valid_negative)
else:
test_negs = test_negs.reshape(test_data.shape[0], 0)
if args.test == 'more_pos':
mask = np.random.rand(len(test_data)) < 0.5
sample = test_data[mask].copy()
sample[ITEM_COLUMN] = sample[ITEM_COLUMN] + 5
test_data = test_data.append(sample)
test_negs_copy = test_negs[mask]
test_negs = torch.cat((test_negs, test_negs_copy), dim=0)
if args.test == 'less_pos':
mask = np.random.rand(len(test_data)) < 0.5
test_data = test_data[mask]
test_negs = test_negs[mask]
# Reshape train set into user,item,label tabular and save
train_ratings = torch.from_numpy(train_data.values).cuda()
train_labels = torch.ones_like(train_ratings[:, 0:1], dtype=torch.float32)
torch.save(train_ratings, os.path.join(args.output, TRAIN_0))
torch.save(train_labels, os.path.join(args.output, TRAIN_1))
# Reshape test set into user,item,label tabular and save
# All users have the same number of items, items for a given user appear consecutively
test_ratings = torch.from_numpy(test_data.values).cuda()
test_users_pos = test_ratings[:, 0:1] # slicing instead of indexing to keep dimensions
test_items_pos = test_ratings[:, 1:2]
test_users = test_users_pos.repeat_interleave(args.valid_negative + 1, dim=0)
test_items = torch.cat((test_items_pos.reshape(-1, 1), test_negs), dim=1).reshape(-1, 1)
positive_labels = torch.ones_like(test_users_pos, dtype=torch.float32)
negative_labels = torch.zeros_like(test_users_pos, dtype=torch.float32).repeat(1, args.valid_negative)
test_labels = torch.cat((positive_labels, negative_labels), dim=1).reshape(-1, 1)
dtypes = {'user': str(test_users.dtype), 'item': str(test_items.dtype), 'label': str(test_labels.dtype)}
test_tensor = torch.cat((test_users, test_items), dim=1)
torch.save(test_tensor, os.path.join(args.output, TEST_0))
torch.save(test_labels, os.path.join(args.output, TEST_1))
if args.test == 'other_names':
dtypes = {'user_2': str(test_users.dtype),
'item_2': str(test_items.dtype),
'label_2': str(test_labels.dtype)}
save_feature_spec(user_cardinality=user_cardinality, item_cardinality=item_cardinality, dtypes=dtypes,
test_negative_samples=args.valid_negative, output_path=args.output + '/feature_spec.yaml',
user_feature_name='user_2',
item_feature_name='item_2',
label_feature_name='label_2')
else:
save_feature_spec(user_cardinality=user_cardinality, item_cardinality=item_cardinality, dtypes=dtypes,
test_negative_samples=args.valid_negative, output_path=args.output + '/feature_spec.yaml')
if __name__ == '__main__':
main()
|
PyTorch/Detection/Efficientdet/waymo_tool | waymo_tool | waymo_data_converter | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import argparse
import glob
import tensorflow as tf
import math
import numpy as np
import itertools
import ipdb
import os
import h5py
import cv2
import sys
import json
import matplotlib.pyplot as plt
from collections import Counter
from google.cloud import storage
tf.compat.v1.enable_eager_execution()
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
args = None
def hash(m, n, t):
return int(int(m)*10000000 + int(n)*100 + int(t))
def parse_args():
global args, seg_id
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=["training", "validation"], default="validation")
parser.add_argument("--tf-dir", default="/workspace/data/waymo_tfrecords_val")
parser.add_argument("--out-dir", default="/workspace/data/waymo_coco_format_val")
parser.add_argument("--seg-min", default=0, type=int)
parser.add_argument("--seg-max", default=1, type=int)
parser.add_argument("--log-file", default="waymo-converter")
args = parser.parse_args()
# set starting seg id
seg_id = args.seg_min
return args
def setup_logging(args):
logging.basicConfig(filename="/results/{}.log".format(args.log_file),
# filemode="w",
format="%(asctime)s:%(levelname)s:%(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=logging.DEBUG)
logging.info('Logging setup done!')
def create_dirs(args):
# create intermediate and out directories
os.makedirs(args.tf_dir, exist_ok=True)
os.makedirs(args.out_dir, exist_ok=True)
args.images_dir = os.path.join(args.out_dir, "images")
args.annotations_dir = os.path.join(args.out_dir, "annotations")
os.makedirs(args.images_dir, exist_ok=True)
os.makedirs(args.annotations_dir, exist_ok=True)
logging.info("Created images and annotations directories: {} {}".format(
args.images_dir, args.annotations_dir))
# set global frame and annotations id
seg_id = 0
frame_id = 0
annotation_id = 0
images_content = []
annotations_content = []
info = {
u'description': u'COCO 2014 Dataset',
u'url': u'http://cocodataset.org',
u'version': u'1.0',
u'year': 2014,
u'contributor': u'COCO Consortium',
u'date_created': u'2017/09/01'
}
licenses = [{
u'url': u'http://creativecommons.org/licenses/by-nc-sa/2.0/',
u'id': 1,
u'name': u'Attribution-NonCommercial-ShareAlike License'
}, {
u'url': u'http://creativecommons.org/licenses/by-nc/2.0/',
u'id': 2,
u'name': u'Attribution-NonCommercial License'
}, {
u'url': u'http://creativecommons.org/licenses/by-nc-nd/2.0/',
u'id': 3,
u'name': u'Attribution-NonCommercial-NoDerivs License'
}, {
u'url': u'http://creativecommons.org/licenses/by/2.0/',
u'id': 4,
u'name': u'Attribution License'
}, {
u'url': u'http://creativecommons.org/licenses/by-sa/2.0/',
u'id': 5,
u'name': u'Attribution-ShareAlike License'
}, {
u'url': u'http://creativecommons.org/licenses/by-nd/2.0/',
u'id': 6,
u'name': u'Attribution-NoDerivs License'
}, {
u'url': u'http://flickr.com/commons/usage/',
u'id': 7,
u'name': u'No known copyright restrictions'
}, {
u'url': u'http://www.usa.gov/copyright.shtml',
u'id': 8,
u'name': u'United States Government Work'
}]
#dataset-specific
category = [{
u'supercategory': u'object',
u'id': 1,
u'name': u'vehicle'
}, {
u'supercategory': u'object',
u'id': 2,
u'name': u'pedestrian'
}, {
u'supercategory': u'object',
u'id': 3,
u'name': u'cyclist'
}]
# Function to convert Waymo TFrecord to COCO format
def convert(tfrecord):
global frame_id, seg_id, annotation_id, images_content, annotations_content
try:
dataset = tf.data.TFRecordDataset(tfrecord, compression_type='')
num_frames = 0
images = []
annotations = []
all_labels = []
# try:
for data in dataset:
frame_id += 1
num_frames += 1
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
image_id = 1
# iterate across images in frame - front, side, etc.,
for index, camera_image in enumerate(frame.images):
output_image = tf.image.decode_jpeg(camera_image.image).numpy()
# iterate across labels in frame - front, side, etc.,
for camera_labels in frame.camera_labels:
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name != camera_image.name:
continue
for image_labels in camera_labels.labels:
#Since label 3 doesn't exist
if image_labels.type == 4:
image_labels.type = 3
annotations.append({
"image_id":
hash(seg_id, frame_id, image_id),
"area":
image_labels.box.width * image_labels.box.length,
"bbox": [
image_labels.box.center_x -
image_labels.box.length / 2.,
image_labels.box.center_y -
image_labels.box.width / 2.,
image_labels.box.length, image_labels.box.width
],
"category_id":
image_labels.type,
"iscrowd":
0,
"id":
annotation_id
})
all_labels.append(image_labels.type)
annotation_id += 1
h, w, c = output_image.shape
plt.imsave("{}/{}_{}_{}.jpg".format(args.images_dir, seg_id, frame_id, image_id),
output_image,
cmap=None)
images.append({
u'license': 1,
u'file_name': "{}_{}_{}.jpg".format(seg_id, frame_id, image_id),
u'waymo_url': None,
u'height': h,
u'width': w,
u'date_captured': u'2013-11-14 16:28:13',
u'flickr_url': None,
u'id': hash(seg_id, frame_id, image_id)
})
image_id += 1
logging.info("Converted {} frames in {}".format(num_frames, tfrecord))
images_content += images
annotations_content += annotations
logging.info("# images: {} # annotations: {}".format(
len(images), len(annotations)))
logging.info("# Label spread: {}".format(Counter(all_labels)))
except:
logging.info("Corrupted record {}".format(tfrecord))
# combine annotations, images data per segment into one annotations.json file
def combine():
global images_content, annotations_content
all_data = {
"info": info,
"images": images_content,
"licenses": licenses,
"annotations": annotations_content,
"categories": category
}
with open("{}/annotations-{}-{}.json".format(args.annotations_dir, args.seg_min, args.seg_max), 'w') as outfile:
json.dump(all_data, outfile)
# download waymo data
def download_and_convert(args):
global seg_id, frame_id
if args.dataset == "training":
num_segs = 32
if args.dataset == "validation":
num_segs = 8
logging.info("Number of segments in dataset: {}".format(num_segs))
logging.info("Segments to process: {} to {}".format(args.seg_min, args.seg_max))
logging.info("Creating google storage client to access waymo bucket")
storage_client = storage.Client(project=None)
bucket_name = "waymo_open_dataset_v_1_2_0"
bucket = storage_client.bucket(bucket_name)
while seg_id < args.seg_max:
# copy from bucket
frame_id = 0
source_blob_name = '{dataset}/{dataset}_{:04}.tar'.format(
seg_id, dataset=args.dataset)
try:
blob = bucket.blob(source_blob_name)
blob.download_to_filename(os.path.join(args.tf_dir, "{}_{:04}.tar".format(args.dataset, seg_id)))
except AssertionError as err:
logging.exception(
"Failed to download segment {}. Make sure GOOGLE_APPLICATION_CREDENTIALS is set and you have access to gs://waymo_open_dataset_v_1_2_0"
.format(seg_id))
sys.exit()
logging.info("Extracting tfrecords from segment: {}_{:04}".format(args.dataset, seg_id))
os.system("cd {}; tar -xvf {}_{:04}.tar".format(args.tf_dir, args.dataset, seg_id))
tfrecords = glob.glob("{}/*.tfrecord".format(args.tf_dir))
# extract data from each record
for record_id, record in enumerate(tfrecords):
if "with_camera_labels" in record:
logging.info("Processing record # {}: {}".format(record_id, record))
convert(record)
else:
logging.info("Skipping record # {}: {}".format(record_id, record))
logging.info("Deleting record # {}: {}...".format(record_id, record))
os.remove(record)
logging.info("Processed {} records".format(len(tfrecords)))
os.remove("{}/{}_{:04}.tar".format(args.tf_dir, args.dataset, seg_id))
os.remove("{}/LICENSE".format(args.tf_dir))
seg_id += 1
# write annotations.json
combine()
if __name__ == "__main__":
# trigger download and conversion of Waymo data
print("Usage: python waymo_data_converter.py --dataset <validation/training> --tf-dir <empty scratch pad dir> --out-dir <empty coco format output dir> --seg-min <0 or any starting seg id> --seg-max <32 - train, 8 - validation or any ending seg id> --log-file <name of log file which will be written to /results>")
args = parse_args()
setup_logging(args)
create_dirs(args)
logging.info("Running on dataset: {} \ntf records dir: {} \ncoco format out dir: {}".format(args.dataset, args.tf_dir, args.out_dir))
download_and_convert(args)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model_dataset | model_dataset | xgboost_electricity | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
dataset:
config:
lag_features:
- name: power_usage
min_value: 1
max_value: 96
model:
config:
max_depth: 14
learning_rate: 0.017
subsample: 0.8
colsample_bytree: 1.0
colsample_bylevel: 0.4
gamma: 0.3
n_rounds: 250
|
TensorFlow/Detection/SSD/models/research/object_detection | object_detection | eval_util | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility functions for evaluation."""
import collections
import os
import time
import numpy as np
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
slim = tf.contrib.slim
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
"""Write metrics to a summary directory.
Args:
metrics: A dictionary containing metric names and values.
global_step: Global step at which the metrics are computed.
summary_dir: Directory to write tensorflow summaries to.
"""
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
# TODO(rathodv): Add tests.
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
"""Visualizes detection results and writes visualizations to image summaries.
This function visualizes an image with its detected bounding boxes and writes
to image summaries which can be viewed on tensorboard. It optionally also
writes images to a directory. In the case of missing entry in the label map,
unknown class name in the visualization is shown as "N/A".
Args:
result_dict: a dictionary holding groundtruth and detection
data corresponding to each image being evaluated. The following keys
are required:
'original_image': a numpy array representing the image with shape
[1, height, width, 3] or [1, height, width, 1]
'detection_boxes': a numpy array of shape [N, 4]
'detection_scores': a numpy array of shape [N]
'detection_classes': a numpy array of shape [N]
The following keys are optional:
'groundtruth_boxes': a numpy array of shape [N, 4]
'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
Detections are assumed to be provided in decreasing order of score and for
display, and we assume that scores are probabilities between 0 and 1.
tag: tensorboard tag (string) to associate with image.
global_step: global step at which the visualization are generated.
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
summary_dir: the output directory to which the image summaries are written.
export_dir: the output directory to which images are written. If this is
empty (default), then images are not exported.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not.
show_groundtruth: boolean (default: False) controlling whether to show
groundtruth boxes in addition to detected boxes
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
min_score_thresh: minimum score threshold for a box to be visualized
max_num_predictions: maximum number of detections to visualize
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
keep_image_id_for_visualization_export: whether to keep image identifier in
filename when exported to export_dir
Raises:
ValueError: if result_dict does not contain the expected keys (i.e.,
'original_image', 'detection_boxes', 'detection_scores',
'detection_classes')
"""
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1: # If one channel image, repeat in RGB.
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
# Plot groundtruth underneath detections
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
"""Evaluates metrics defined in evaluators and returns summaries.
This function loads the latest checkpoint in checkpoint_dirs and evaluates
all metrics defined in evaluators. The metrics are processed in batch by the
batch_processor.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking four arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
To skip an image, it suffices to return an empty dictionary in place of
result_dict.
checkpoint_dirs: list of directories to load into an EnsembleModel. If it
has only one directory, EnsembleModel will not be used --
a DetectionModel
will be instantiated directly. Not used if restore_fn is set.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: None, or a function that takes a tf.Session object and correctly
restores all necessary variables from the correct checkpoint file. If
None, attempts to restore from the first directory in checkpoint_dirs.
num_batches: the number of batches to use for evaluation.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
Returns:
global_step: the count of global steps.
all_evaluator_metrics: A dictionary containing metric names and values.
Raises:
ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
one element.
ValueError: if save_graph is True and save_graph_dir is not defined.
"""
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with tf.contrib.slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
# TODO(b/65130867): Use image_id tensor once we fix the input data
# decoders to return correct image_id.
# TODO(akuznetsa): result_dict contains batches of images, while
# add_single_ground_truth_image_info expects a single image. Fix
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
# When done, ask the threads to stop.
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
sess.close()
return (global_step, all_evaluator_metrics)
# TODO(rathodv): Add tests.
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
"""Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.
This function repeatedly loads a checkpoint and evaluates a desired
set of tensors (provided by tensor_dict) and hands the resulting numpy
arrays to a function result_processor which can be used to further
process/save/visualize the results.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
summary_dir: a directory to write metrics summaries.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking three arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
checkpoint_dirs: list of directories to load into a DetectionModel or an
EnsembleModel if restore_fn isn't set. Also used to determine when to run
next evaluation. Must have at least one element.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: a function that takes a tf.Session object and correctly restores
all necessary variables from the correct checkpoint file.
num_batches: the number of batches to use for evaluation.
eval_interval_secs: the number of seconds between each evaluation run.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as None the evaluation continues indefinitely.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.
save_graph_dir: where to save on disk the Tensorflow graph. If store_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
Returns:
metrics: A dictionary containing metric names and values in the latest
evaluation.
Raises:
ValueError: if max_num_of_evaluations is not None or a positive number.
ValueError: if checkpoint_dirs doesn't have at least one element.
"""
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path)
write_metrics(metrics, global_step, summary_dir)
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(args):
detection_boxes, detection_masks, image_shape = args
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
def _resize_groundtruth_masks(args):
mask, image_shape = args
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(mask, 3), tf.uint8)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
image: A single 4D uint8 image tensor of shape [1, H, W, C].
key: A single string tensor identifying the image.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
Returns:
A dictionary with:
'original_image': A [1, H, W, C] uint8 image tensor.
'key': A string tensor with image identifier.
'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [max_detections] float32 tensor of scores.
'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
'detection_masks': [max_detections, H, W] float32 tensor of binarized
masks, reframed to full image masks.
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
(Optional)
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
"""
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
# expand groundtruth dict along the batch dimension.
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
# remove the batch dimension.
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].
keys: A [batch_size] string tensor with image identifier.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor
of boxes, in normalized coordinates.
'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of
1-indexed classes.
'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of
bbox area. (Optional)
'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_difficult': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_group_of': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance
masks (Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]
used to resize the image. When set to None, the image size is retained.
true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]
containing the size of the unpadded original_image.
max_gt_boxes: [batch_size] tensor representing the maximum number of
groundtruth boxes to pad.
Returns:
A dictionary with:
'original_image': A [batch_size, H, W, C] uint8 image tensor.
'original_image_spatial_shape': A [batch_size, 2] tensor containing the
original image sizes.
'true_image_shape': A [batch_size, 3] tensor containing the size of
the unpadded original_image.
'key': A [batch_size] string tensor with image identifier.
'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,
in normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [batch_size, max_detections] float32 tensor of scores.
'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed
classes.
'detection_masks': [batch_size, max_detections, H, W] float32 tensor of
binarized masks, reframed to full image masks.
'num_detections': [batch_size] int64 tensor containing number of valid
detections.
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes. (Optional)
'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox
area. (Optional)
'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance masks
(Optional).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image.
Raises:
ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape
[2].
ValueError: if true_image_shapes is not 2D int32 tensor of shape
[3].
"""
label_id_offset = 1 # Applying label id offset (b/63711816)
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.to_int32(detections[detection_fields.num_detections])
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
# TODO(rathodv): This should be done in model's postprocess
# function ideally.
output_dict[detection_fields.detection_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes, detection_masks,
original_image_spatial_shapes],
dtype=tf.uint8))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, original_image_spatial_shapes],
dtype=tf.uint8))
output_dict.update(groundtruth)
if scale_to_absolute:
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
"""Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
eval_dict: An evaluation dictionary, returned from
result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can be
used as eval metric ops in tf.EstimatorSpec.
"""
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
"""Produces a dictionary of evaluation options for each eval metric.
Args:
eval_config: An `eval_pb2.EvalConfig`.
Returns:
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
"""
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
return evaluator_options
|
CUDA-Optimized/FastSpeech/fastspeech/trt | trt | __init__ | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tensorrt as trt
TRT_BASE_PATH = os.path.dirname(__file__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO) |
PyTorch/Classification/ConvNets/scripts | scripts | rn50_partial | FLAGS=$1
STAGE_ID=$2
STAGE_LEN=$3
python ./multiproc.py \
--nproc_per_node 8 \
./main.py /imagenet \
-j5 -p 100 \
--data-backend pytorch \
--raport-file report_$STAGE_ID.json \
--lr 2.048 \
--batch-size 256 \
--optimizer-batch-size 2048 \
--static-loss-scale 128 \
--warmup 8 \
--arch resnet50 -c fanin \
--label-smoothing 0.1 \
--lr-schedule cosine \
--mom 0.875 \
--wd 3.0517578125e-05 \
--workspace /results \
--epochs 90 \
--run-epochs $STAGE_LEN \
$FLAGS \
--resume /results/checkpoint_$( expr $STAGE_ID - 1).pth.tar \
--checkpoint checkpoint_$STAGE_ID.pth.tar
|
TensorFlow/Detection/SSD/models/research/object_detection/models/keras_applications | keras_applications | mobilenet_v2_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
import itertools
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models.keras_applications import mobilenet_v2
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
_layers_to_check = [
'Conv1_relu',
'block_1_expand_relu', 'block_1_depthwise_relu', 'block_1_project_BN',
'block_2_expand_relu', 'block_2_depthwise_relu', 'block_2_project_BN',
'block_3_expand_relu', 'block_3_depthwise_relu', 'block_3_project_BN',
'block_4_expand_relu', 'block_4_depthwise_relu', 'block_4_project_BN',
'block_5_expand_relu', 'block_5_depthwise_relu', 'block_5_project_BN',
'block_6_expand_relu', 'block_6_depthwise_relu', 'block_6_project_BN',
'block_7_expand_relu', 'block_7_depthwise_relu', 'block_7_project_BN',
'block_8_expand_relu', 'block_8_depthwise_relu', 'block_8_project_BN',
'block_9_expand_relu', 'block_9_depthwise_relu', 'block_9_project_BN',
'block_10_expand_relu', 'block_10_depthwise_relu', 'block_10_project_BN',
'block_11_expand_relu', 'block_11_depthwise_relu', 'block_11_project_BN',
'block_12_expand_relu', 'block_12_depthwise_relu', 'block_12_project_BN',
'block_13_expand_relu', 'block_13_depthwise_relu', 'block_13_project_BN',
'block_14_expand_relu', 'block_14_depthwise_relu', 'block_14_project_BN',
'block_15_expand_relu', 'block_15_depthwise_relu', 'block_15_project_BN',
'block_16_expand_relu', 'block_16_depthwise_relu', 'block_16_project_BN',
'out_relu']
class MobilenetV2Test(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
train: true,
scale: false,
center: true,
decay: 0.2,
epsilon: 0.1,
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_application_with_layer_outputs(
self, layer_names, batchnorm_training,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None):
"""Constructs Keras mobilenetv2 that extracts intermediate layer outputs."""
if not layer_names:
layer_names = _layers_to_check
full_model = mobilenet_v2.mobilenet_v2(
batchnorm_training=batchnorm_training,
conv_hyperparams=conv_hyperparams,
weights=None,
use_explicit_padding=use_explicit_padding,
alpha=alpha,
min_depth=min_depth,
include_top=False)
layer_outputs = [full_model.get_layer(name=layer).output
for layer in layer_names]
return tf.keras.Model(
inputs=full_model.inputs,
outputs=layer_outputs)
def _check_returns_correct_shape(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False, min_depth=None,
layer_names=None):
def graph_fn(image_tensor):
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=depth_multiplier)
return model(image_tensor)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def _check_returns_correct_shapes_with_dynamic_inputs(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False,
layer_names=None):
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([batch_size, image_height, image_width,
3], dtype=tf.float32)
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
alpha=depth_multiplier)
return model(image_tensor)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, depth_multiplier, layer_names=None):
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=False,
alpha=depth_multiplier)
model(preprocessed_inputs)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
def test_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = [(2, 64, 64, 32),
(2, 64, 64, 96),
(2, 32, 32, 96),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 32, 32, 144),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 16, 16, 144),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 8, 8, 192),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 4, 4, 576),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 320),
(2, 4, 4, 1280)]
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = [(2, 64, 64, 32),
(2, 64, 64, 96),
(2, 32, 32, 96),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 32, 32, 144),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 16, 16, 144),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 8, 8, 192),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 4, 4, 576),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 320),
(2, 4, 4, 1280)]
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=True)
def test_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = [(2, 64, 64, 32),
(2, 64, 64, 96),
(2, 32, 32, 96),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 32, 32, 144),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 16, 16, 144),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 8, 8, 192),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 4, 4, 576),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 320),
(2, 4, 4, 1280)]
self._check_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
expected_feature_map_shape = [(2, 150, 150, 32),
(2, 150, 150, 96),
(2, 75, 75, 96),
(2, 75, 75, 24),
(2, 75, 75, 144),
(2, 75, 75, 144),
(2, 75, 75, 24),
(2, 75, 75, 144),
(2, 38, 38, 144),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 19, 19, 192),
(2, 19, 19, 64),
(2, 19, 19, 384),
(2, 19, 19, 384),
(2, 19, 19, 64),
(2, 19, 19, 384),
(2, 19, 19, 384),
(2, 19, 19, 64),
(2, 19, 19, 384),
(2, 19, 19, 384),
(2, 19, 19, 64),
(2, 19, 19, 384),
(2, 19, 19, 384),
(2, 19, 19, 96),
(2, 19, 19, 576),
(2, 19, 19, 576),
(2, 19, 19, 96),
(2, 19, 19, 576),
(2, 19, 19, 576),
(2, 19, 19, 96),
(2, 19, 19, 576),
(2, 10, 10, 576),
(2, 10, 10, 160),
(2, 10, 10, 960),
(2, 10, 10, 960),
(2, 10, 10, 160),
(2, 10, 10, 960),
(2, 10, 10, 960),
(2, 10, 10, 160),
(2, 10, 10, 960),
(2, 10, 10, 960),
(2, 10, 10, 320),
(2, 10, 10, 1280)]
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
expected_feature_map_shape = [(2, 150, 150, 32),
(2, 150, 150, 192),
(2, 75, 75, 192),
(2, 75, 75, 32),
(2, 75, 75, 192),
(2, 75, 75, 192),
(2, 75, 75, 32),
(2, 75, 75, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 10, 10, 192),
(2, 10, 10, 32),
(2, 10, 10, 192),
(2, 10, 10, 192),
(2, 10, 10, 32),
(2, 10, 10, 192),
(2, 10, 10, 192),
(2, 10, 10, 32),
(2, 10, 10, 192),
(2, 10, 10, 192),
(2, 10, 10, 32),
(2, 10, 10, 32)]
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, min_depth=32)
def test_hyperparam_override(self):
hyperparams = self._build_conv_hyperparams()
model = mobilenet_v2.mobilenet_v2(
batchnorm_training=True,
conv_hyperparams=hyperparams,
weights=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=32,
include_top=False)
hyperparams.params()
bn_layer = model.get_layer(name='block_5_project_BN')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
depth_multiplier = 1
variables = self._get_variables(depth_multiplier)
self.assertEqual(len(variables), 260)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/Classification/ConvNets/dataprep | dataprep | preprocess_imagenet | #!/bin/bash
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Script to download and preprocess ImageNet Challenge 2012
# training and validation data set.
#
# The final output of this script are sharded TFRecord files containing
# serialized Example protocol buffers. See build_imagenet_data.py for
# details of how the Example protocol buffers contain the ImageNet data.
#
# The final output of this script appears as such:
#
# data_dir/train-00000-of-01024
# data_dir/train-00001-of-01024
# ...
# data_dir/train-01023-of-01024
#
# and
#
# data_dir/validation-00000-of-00128
# data_dir/validation-00001-of-00128
# ...
# data_dir/validation-00127-of-00128
#
# Note that this script may take several hours to run to completion. The
# conversion of the ImageNet data to TFRecords alone takes 2-3 hours depending
# on the speed of your machine. Please be patient.
#
# **IMPORTANT**
# To download the raw images, the user must create an account with image-net.org
# and generate a username and access_key. The latter two are required for
# downloading the raw images.
#
# usage:
# ./preprocess_imagenet.sh [data-dir]
set -e
if [ -z "$1" ]; then
echo "Usage: preprocess_imagenet.sh [data dir]"
exit
fi
DATA_DIR="${1%/}"
SCRATCH_DIR="${DATA_DIR}/raw-data/"
mkdir -p ${SCRATCH_DIR}
# Convert the XML files for bounding box annotations into a single CSV.
echo "Extracting bounding box information from XML."
BOUNDING_BOX_SCRIPT="./dataprep/process_bounding_boxes.py"
BOUNDING_BOX_FILE="${DATA_DIR}/imagenet_2012_bounding_boxes.csv"
BOUNDING_BOX_DIR="${DATA_DIR}/bounding_boxes/"
LABELS_FILE="./dataprep/imagenet_lsvrc_2015_synsets.txt"
"${BOUNDING_BOX_SCRIPT}" "${BOUNDING_BOX_DIR}" "${LABELS_FILE}" \
| sort > "${BOUNDING_BOX_FILE}"
echo "preprocessing the ImageNet data."
# Build the TFRecords version of the ImageNet data.
OUTPUT_DIRECTORY="${DATA_DIR}"
IMAGENET_METADATA_FILE="./dataprep/imagenet_metadata.txt"
python ./dataprep/build_imagenet_data.py \
--train_directory="${DATA_DIR}/train" \
--validation_directory="${DATA_DIR}/val" \
--output_directory="${DATA_DIR}/result" \
--imagenet_metadata_file="${IMAGENET_METADATA_FILE}" \
--labels_file="${LABELS_FILE}" \
--bounding_box_file="${BOUNDING_BOX_FILE}"
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | faster_rcnn_resnet_v1_feature_extractor | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet V1 Faster R-CNN implementation.
See "Deep Residual Learning for Image Recognition" by He et al., 2015.
https://arxiv.org/abs/1512.03385
Note: this implementation assumes that the classification checkpoint used
to finetune this model is trained using the same configuration as that of
the MSRA provided checkpoints
(see https://github.com/KaimingHe/deep-residual-networks), e.g., with
same preprocessing, batch norm scaling, etc.
"""
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from nets import resnet_utils
from nets import resnet_v1
slim = tf.contrib.slim
class FasterRCNNResnetV1FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Resnet V1 feature extractor implementation."""
def __init__(self,
architecture,
resnet_model,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
architecture: Architecture name of the Resnet V1 model.
resnet_model: Definition of the Resnet V1 model.
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._architecture = architecture
self._resnet_model = resnet_model
super(FasterRCNNResnetV1FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping feature extractor tensor names to
tensors
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
if len(preprocessed_inputs.get_shape().as_list()) != 4:
raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
'tensor of shape %s' % preprocessed_inputs.get_shape())
shape_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
# Disables batchnorm for fine-tuning with smaller batch sizes.
# TODO(chensun): Figure out if it is needed when image
# batch size is bigger.
with slim.arg_scope(
resnet_utils.resnet_arg_scope(
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
weight_decay=self._weight_decay)):
with tf.variable_scope(
self._architecture, reuse=self._reuse_weights) as var_scope:
_, activations = self._resnet_model(
preprocessed_inputs,
num_classes=None,
is_training=self._train_batch_norm,
global_pool=False,
output_stride=self._first_stage_features_stride,
spatial_squeeze=False,
scope=var_scope)
handle = scope + '/%s/block3' % self._architecture
return activations[handle], activations
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope(self._architecture, reuse=self._reuse_weights):
with slim.arg_scope(
resnet_utils.resnet_arg_scope(
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
weight_decay=self._weight_decay)):
with slim.arg_scope([slim.batch_norm],
is_training=self._train_batch_norm):
blocks = [
resnet_utils.Block('block4', resnet_v1.bottleneck, [{
'depth': 2048,
'depth_bottleneck': 512,
'stride': 1
}] * 3)
]
proposal_classifier_features = resnet_utils.stack_blocks_dense(
proposal_feature_maps, blocks)
return proposal_classifier_features
class FasterRCNNResnet50FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 50 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet50FeatureExtractor, self).__init__(
'resnet_v1_50', resnet_v1.resnet_v1_50, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
class FasterRCNNResnet101FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 101 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet101FeatureExtractor, self).__init__(
'resnet_v1_101', resnet_v1.resnet_v1_101, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
class FasterRCNNResnet152FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 152 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet152FeatureExtractor, self).__init__(
'resnet_v1_152', resnet_v1.resnet_v1_152, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
|
PyTorch/SpeechSynthesis/Tacotron2/platform | platform | DGX1_waveglow_FP32_1NGPU_train | mkdir -p output
python train.py -m WaveGlow -o output/ -lr 1e-4 --epochs 1001 -bs 4 --segment-length 8000 --weight-decay 0 --grad-clip-thresh 3.4028234663852886e+38 --cudnn-benchmark --cudnn-enabled --log-file nvlog.json
|
TensorFlow/Segmentation/UNet_Industrial/utils | utils | logging | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import dllogger as Logger
def format_step(step):
if isinstance(step, str):
return step
if isinstance(step, int):
return "Iteration: {} ".format(step)
s = ""
if len(step) > 0:
s += "Epoch: {} ".format(step[0])
if len(step) > 1:
s += "Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
return s
def init_dllogger(log_dir):
Logger.init([
Logger.StdOutBackend(Logger.Verbosity.DEFAULT, step_format=format_step),
Logger.JSONStreamBackend(Logger.Verbosity.VERBOSE, log_dir)
])
|
PyTorch/Recommendation/DLRM/dlrm/scripts | scripts | transcode | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import os
from collections import defaultdict
import torch
import pandas as pd
from dlrm.data.feature_spec import FeatureSpec
from dlrm.data.defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, CARDINALITY_SELECTOR
from dlrm.data.defaults import get_categorical_feature_type
def parse_args():
parser = ArgumentParser()
parser.add_argument('--input', type=str, default='',
help='Path to input data directory')
parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml',
help='Name of the input feature specification file')
parser.add_argument('--output', type=str, default='/data',
help='Path to output data directory')
parser.add_argument('--feature_spec_out', type=str, default='feature_spec.yaml',
help='Name of the output feature specification file')
parser.add_argument('--chunk_size', type=int, default=65536)
return parser.parse_args()
def main():
args = parse_args()
args_output = args.output
args_input = args.input
args_feature_spec_in = args.feature_spec_in
args_feature_spec_out = args.feature_spec_out
batch_size = args.chunk_size
fspec_in_path = os.path.join(args_input, args_feature_spec_in)
fspec_in = FeatureSpec.from_yaml(fspec_in_path)
input_label_feature_name = fspec_in.channel_spec[LABEL_CHANNEL][0]
input_numerical_features_list = fspec_in.channel_spec[NUMERICAL_CHANNEL]
input_categorical_features_list = fspec_in.channel_spec[CATEGORICAL_CHANNEL]
# Do a pass to establish the cardinalities: they influence the type we save the dataset as
found_cardinalities = defaultdict(lambda: 0)
for mapping_name, mapping in fspec_in.source_spec.items():
df_iterators = []
for chunk in mapping:
assert chunk['type'] == 'csv', "Only csv files supported in this transcoder"
assert len(chunk['files']) == 1, "Only one file per chunk supported in this transcoder"
path_to_load = os.path.join(fspec_in.base_directory, chunk['files'][0])
chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk['features'])
df_iterators.append(chunk_iterator)
zipped = zip(*df_iterators)
for chunks in zipped:
mapping_df = pd.concat(chunks, axis=1)
for feature in input_categorical_features_list:
mapping_cardinality = mapping_df[feature].max() + 1
previous_cardinality = found_cardinalities[feature]
found_cardinalities[feature] = max(previous_cardinality, mapping_cardinality)
for feature in input_categorical_features_list:
declared_cardinality = fspec_in.feature_spec[feature][CARDINALITY_SELECTOR]
if declared_cardinality == 'auto':
pass
else:
assert int(declared_cardinality) >= found_cardinalities[feature]
found_cardinalities[feature] = int(declared_cardinality)
categorical_cardinalities = [found_cardinalities[f] for f in input_categorical_features_list]
number_of_numerical_features = fspec_in.get_number_of_numerical_features()
fspec_out = FeatureSpec.get_default_feature_spec(number_of_numerical_features=number_of_numerical_features,
categorical_feature_cardinalities=categorical_cardinalities)
fspec_out.base_directory = args.output
for mapping_name, mapping in fspec_in.source_spec.items():
# open files for outputting
label_path, numerical_path, categorical_paths = fspec_out.get_mapping_paths(mapping_name)
for path in [label_path, numerical_path, *categorical_paths.values()]:
os.makedirs(os.path.dirname(path), exist_ok=True)
output_categorical_features_list = fspec_out.get_categorical_feature_names()
numerical_f = open(numerical_path, "ab+")
label_f = open(label_path, "ab+")
categorical_fs = [open(categorical_paths[name], "ab+") for name in output_categorical_features_list]
categorical_feature_types = [get_categorical_feature_type(card) for card in categorical_cardinalities]
df_iterators = []
for chunk in mapping:
# We checked earlier it's a single file chunk
path_to_load = os.path.join(fspec_in.base_directory, chunk['files'][0])
chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk['features'])
df_iterators.append(chunk_iterator)
zipped = zip(*df_iterators)
for chunks in zipped:
mapping_df = pd.concat(chunks, axis=1) # This takes care of making sure feature names are unique
# Choose the right columns
numerical_df = mapping_df[input_numerical_features_list]
categorical_df = mapping_df[input_categorical_features_list]
label_df = mapping_df[[input_label_feature_name]]
numerical = torch.tensor(numerical_df.values)
label = torch.tensor(label_df.values)
categorical = torch.tensor(categorical_df.values)
# Append them to the binary files
numerical_f.write(numerical.to(torch.float16).cpu().numpy().tobytes())
label_f.write(label.to(torch.bool).cpu().numpy().tobytes())
for cat_idx, cat_feature_type in enumerate(categorical_feature_types):
categorical_fs[cat_idx].write(
categorical[:, cat_idx].cpu().numpy().astype(cat_feature_type).tobytes())
feature_spec_save_path = os.path.join(args_output, args_feature_spec_out)
fspec_out.to_yaml(output_path=feature_spec_save_path)
if __name__ == '__main__':
main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model_dataset | model_dataset | xgboost_traffic | dataset:
config:
lag_features:
- name: values
min_value: 1
max_value: 24
model:
config:
max_depth: 10
learning_rate: 0.02
subsample: 0.8
colsample_bytree: 0.8 |
TensorFlow2/Segmentation/nnUNet/data_preprocessing | data_preprocessing | transforms | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def generate_foreground_bounding_box(img):
"""
Generate the spatial bounding box of foreground in the image with start-end positions.
Foreground is defined by positive intensity across channels.
The output format of the coordinates is:
[1st_spatial_dim_start, 2nd_spatial_dim_start, ..., Nth_spatial_dim_start],
[1st_spatial_dim_end, 2nd_spatial_dim_end, ..., Nth_spatial_dim_end]
The bounding boxes edges are aligned with the input image edges.
This function returns [0, 0, ...], [0, 0, ...] if there's no positive intensity.
Args:
img: source image to generate bounding box from.
"""
data = np.any(img > 0, axis=0)
ndim = len(data.shape)
if not data.any():
return [0] * ndim, [0] * ndim
else:
indices = np.where(data)
box_start = [ax.min() for ax in indices]
box_end = [ax.max() + 1 for ax in indices]
return box_start, box_end
def spatial_crop(img, box_start, box_end):
slices = [slice(s, e) for s, e in zip(box_start, box_end)]
sd = min(len(slices), len(img.shape[1:]))
slices = [slice(None)] + slices[:sd]
return img[tuple(slices)]
def crop_foreground(image, label=None):
box_start, box_end = generate_foreground_bounding_box(image)
box_start = np.asarray(box_start, dtype=np.int16)
box_end = np.asarray(box_end, dtype=np.int16)
image_cropped = spatial_crop(image, box_start, box_end)
label_cropped = spatial_crop(label, box_start, box_end) if label is not None else None
return image_cropped, label_cropped, (box_start, box_end)
def _normalize(img, nonzero, eps=1e-7):
slices = (img != 0) if nonzero else np.ones(img.shape, dtype=bool)
if not np.any(slices):
return img
sub = np.mean(img[slices])
div = np.std(img[slices])
if div == 0.0:
div = eps
img[slices] = (img[slices] - sub) / div
return img
def normalize_intensity(img, nonzero=True, channel_wise=True):
if channel_wise:
for i, d in enumerate(img):
img[i] = _normalize(d, nonzero=nonzero)
else:
img = _normalize(img, nonzero=nonzero)
return img.astype(np.float32)
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/feature_specs | feature_specs | default | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
- cat_6.bin
- cat_7.bin
- cat_8.bin
- cat_9.bin
- cat_10.bin
- cat_11.bin
- cat_12.bin
- cat_13.bin
- cat_14.bin
- cat_15.bin
- cat_16.bin
- cat_17.bin
- cat_18.bin
- cat_19.bin
- cat_20.bin
- cat_21.bin
- cat_22.bin
- cat_23.bin
- cat_24.bin
- cat_25.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
- num_3
- num_4
- num_5
- num_6
- num_7
- num_8
- num_9
- num_10
- num_11
- num_12
feature_spec:
cat_0.bin:
cardinality: 100000
dtype: int32
cat_1.bin:
cardinality: 100000
dtype: int32
cat_10.bin:
cardinality: 100000
dtype: int32
cat_11.bin:
cardinality: 100000
dtype: int32
cat_12.bin:
cardinality: 100000
dtype: int32
cat_13.bin:
cardinality: 100000
dtype: int32
cat_14.bin:
cardinality: 100000
dtype: int32
cat_15.bin:
cardinality: 100000
dtype: int32
cat_16.bin:
cardinality: 100000
dtype: int32
cat_17.bin:
cardinality: 100000
dtype: int32
cat_18.bin:
cardinality: 100000
dtype: int32
cat_19.bin:
cardinality: 100000
dtype: int32
cat_2.bin:
cardinality: 100000
dtype: int32
cat_20.bin:
cardinality: 100000
dtype: int32
cat_21.bin:
cardinality: 100000
dtype: int32
cat_22.bin:
cardinality: 100000
dtype: int32
cat_23.bin:
cardinality: 100000
dtype: int32
cat_24.bin:
cardinality: 100000
dtype: int32
cat_25.bin:
cardinality: 100000
dtype: int32
cat_3.bin:
cardinality: 100000
dtype: int32
cat_4.bin:
cardinality: 100000
dtype: int32
cat_5.bin:
cardinality: 100000
dtype: int32
cat_6.bin:
cardinality: 100000
dtype: int32
cat_7.bin:
cardinality: 100000
dtype: int32
cat_8.bin:
cardinality: 100000
dtype: int32
cat_9.bin:
cardinality: 100000
dtype: int32
label:
dtype: bool
num_0:
dtype: float16
num_1:
dtype: float16
num_10:
dtype: float16
num_11:
dtype: float16
num_12:
dtype: float16
num_2:
dtype: float16
num_3:
dtype: float16
num_4:
dtype: float16
num_5:
dtype: float16
num_6:
dtype: float16
num_7:
dtype: float16
num_8:
dtype: float16
num_9:
dtype: float16
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: split_binary
- features:
- label
files:
- test/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- test/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- test/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- test/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- test/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- test/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- test/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- test/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- test/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- test/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- test/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- test/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- test/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- test/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- test/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- test/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- test/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- test/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- test/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- test/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- test/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- test/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- test/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- test/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- test/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- test/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- test/cat_25.bin
type: split_binary
train:
- features: *id001
files:
- train/numerical.bin
type: split_binary
- features:
- label
files:
- train/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- train/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- train/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- train/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- train/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- train/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- train/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- train/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- train/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- train/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- train/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- train/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- train/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- train/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- train/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- train/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- train/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- train/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- train/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- train/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- train/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- train/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- train/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- train/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- train/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- train/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- train/cat_25.bin
type: split_binary
|
PyTorch/DrugDiscovery/MoFlow/moflow/runtime | runtime | distributed_utils | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
import torch.distributed as dist
def get_device(local_rank: int) -> torch.device:
if torch.cuda.is_available():
torch.cuda.set_device(local_rank % torch.cuda.device_count())
device = torch.device("cuda")
else:
device = torch.device("cpu")
logging.warning("not using a(ny) GPU(s)!")
return device
def get_world_size() -> int:
return int(os.environ.get("WORLD_SIZE", 1))
def reduce_tensor(tensor: torch.Tensor, num_gpus: int) -> torch.Tensor:
if num_gpus > 1:
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
if rt.is_floating_point():
rt = rt / num_gpus
else:
rt = rt // num_gpus
return rt
return tensor
def init_distributed() -> bool:
world_size = int(os.environ.get("WORLD_SIZE", 1))
distributed = world_size > 1
if distributed:
backend = "nccl" if torch.cuda.is_available() else "gloo"
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0" # Needed for CUDA graphs
dist.init_process_group(backend=backend, init_method="env://")
assert dist.is_initialized()
if get_rank() == 0:
logging.info(f"Distributed initialized. World size: {world_size}")
return distributed
def get_rank() -> int:
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
|
MxNet/Classification/RN50v1.5 | RN50v1.5 | log_utils | import logging
import os
import sys
import dllogger
import horovod.mxnet as hvd
def format_step(step):
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
s += "Epoch: {} ".format(step[0])
if len(step) > 1:
s += "Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
if len(step) == 0:
s = "Summary:"
return s
def setup_logging(args):
logging.basicConfig(level=logging.DEBUG, format='{asctime}:{levelname}: {message}', style='{')
if hvd.rank() == 0:
logging_dir = args.logdir if args.logdir is not None else args.workspace
dllogger.init(backends=[
dllogger.StdOutBackend(dllogger.Verbosity.DEFAULT, step_format=format_step),
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE, os.path.join(logging_dir, args.dllogger_log)),
])
else:
dllogger.init([])
dllogger.metadata("val.accuracy", {"unit": None})
dllogger.metadata("val.top_k_accuracy_5", {"unit": None})
dllogger.metadata("train.ips", {"unit": "images/s"})
dllogger.metadata("val.ips", {"unit": "images/s"})
dllogger.metadata("val.latency_50", {"unit": "s"})
dllogger.metadata("val.latency_90", {"unit": "s"})
dllogger.metadata("val.latency_avg", {"unit": "s"})
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/structures | structures | segmentation_mask | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
import pycocotools.mask as mask_utils
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Mask(object):
"""
This class is unfinished and not meant for use yet
It is supposed to contain the mask for an object as
a 2d tensor
"""
def __init__(self, masks, size, mode):
self.masks = masks
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 2
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
flip_idx = list(range(dim)[::-1])
flipped_masks = self.masks.index_select(dim, flip_idx)
return Mask(flipped_masks, self.size, self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]]
return Mask(cropped_masks, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
pass
class Polygons(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, polygons, size, mode, pin_memory=False):
# assert isinstance(polygons, list), '{}'.format(polygons)
if isinstance(polygons, list):
polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons]
if pin_memory:
polygons = [p.pin_memory() for p in polygons]
elif isinstance(polygons, Polygons):
polygons = polygons.polygons
self.polygons = polygons
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return Polygons(flipped_polygons, size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
# TODO chck if necessary
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)
p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)
cropped_polygons.append(p)
return Polygons(cropped_polygons, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return Polygons(scaled_polys, size, mode=self.mode)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return Polygons(scaled_polygons, size=size, mode=self.mode)
def convert(self, mode):
width, height = self.size
if mode == "mask":
rles = mask_utils.frPyObjects(
[p.numpy() for p in self.polygons], height, width
)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
# TODO add squeeze?
return mask
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_polygons={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class SegmentationMask(object):
"""
This class stores the segmentations for all objects in the image
"""
def __init__(self, polygons, size, mode=None, pin_memory=False):
"""
Arguments:
polygons: a list of list of lists of numbers. The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
object, and the third level to the polygon coordinates.
"""
assert isinstance(polygons, list)
self.polygons = [Polygons(p, size, mode, pin_memory=pin_memory) for p in polygons]
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped = []
for polygon in self.polygons:
flipped.append(polygon.transpose(method))
return SegmentationMask(flipped, size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped = []
for polygon in self.polygons:
cropped.append(polygon.crop(box))
return SegmentationMask(cropped, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
scaled = []
for polygon in self.polygons:
scaled.append(polygon.resize(size, *args, **kwargs))
return SegmentationMask(scaled, size=size, mode=self.mode)
def to(self, *args, **kwargs):
return self
def __getitem__(self, item):
if isinstance(item, (int, slice)):
selected_polygons = [self.polygons[item]]
else:
# advanced indexing on a single dimension
selected_polygons = []
if isinstance(item, torch.Tensor) and \
(item.dtype == torch.uint8 or item.dtype == torch.bool):
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
selected_polygons.append(self.polygons[i])
return SegmentationMask(selected_polygons, size=self.size, mode=self.mode)
def __iter__(self):
return iter(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2LSTMCellPlugin | taco2LSTMCellPlugin | taco2LSTMCellKernel | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_LSTMCELLKERNEL_H
#define TT2I_LSTMCELLKERNEL_H
#include "cudaMemory.h"
#include "cuda_runtime.h"
namespace nvinfer1
{
namespace plugin
{
class Taco2LSTMCellKernel
{
public:
/**
* @brief Create a new Taco2LSTMCellKernel.
*
* @param inputWeightsHost The weight matrix for the input (Wi).
* @param hiddenWeightsHost The weight matrix for the hidden states (Wh).
* @param inputBiasHost The input bias (Bi).
* @param hiddenBiasHost The hidden bias (Bh).
* @param inputLength The length of the input.
* @param numDimension The number of hidden dimensions.
* @param useFP16 Whether or not to use fp16 format weights.
*/
Taco2LSTMCellKernel(const float* inputWeightsHost, const float* hiddenWeightsHost, const float* inputBiasHost,
const float* hiddenBiasHost, const int inputLength, const int numDimension, bool useFP16);
/**
* @brief Execute an LSTM cell.
*
* @param inputA The first half of the input vector.
* @param inputB The second half of the input vector.
* @param hiddenIn The hidden states (input).
* @param cellIn The cell states (input).
* @param hiddenOut The hidden states (output).
* @param cellOut The cell states (output).
* @param inputLengthA The length of the first input.
* @param inputLengthB The length of the second input.
* @param numDimensions The number of dimensions.
* @param stream The stream to execute on.
*/
void execute(const float* inputA, const float* inputB, const float* hiddenIn, const float* cellIn, float* hiddenOut,
float* cellOut, int inputLengthA, int inputLengthB, cudaStream_t stream);
private:
int mInputLength;
int mNumDimension;
bool mFp16;
tts::CudaMemory<float> mWeightsDevice;
tts::CudaMemory<float> mBiasDevice;
};
} // namespace plugin
} // namespace nvinfer1
#endif
|
PyTorch/LanguageModeling/BART/scripts | scripts | run_pretraining_phase2 | #!/usr/bin/env bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
echo "Container nvidia build = " $NVIDIA_BUILD_ID
train_batch_size_phase1=${1:-200}
train_batch_size_phase2=${2:-32}
learning_rate_phase1=${3:-"5e-3"}
learning_rate_phase2=${4:-"4e-3"}
precision=${5:-"bf16"}
use_preln=${6:-"true"}
num_gpus=${7:-8}
warmup_steps_phase1=${8:-"2166"}
warmup_steps_phase2=${9:-"200"}
train_steps_phase1=${10:-95040}
train_steps_phase2=${11:-7560}
save_checkpoints_steps=${12:-100}
num_accumulation_steps_phase1=${13:-40}
num_accumulation_steps_phase2=${14:-120}
config_path=${15:-"configs/config.json"}
DATA_DIR=${DATA_DIR:-data}
RESULTS_DIR=${RESULTS_DIR:-results}
RESULTS_DIR_PHASE2=${RESULTS_DIR}/phase_2
mkdir -m 777 -p $RESULTS_DIR_PHASE2
DATESTAMP=`date +'%y%m%d%H%M%S'`
LOGFILE=$RESULTS_DIR_PHASE2/$DATESTAMP.log
printf "Logs written to %s\n" "$LOGFILE"
SOURCE_LEN=512
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
USE_FP16="--fp16"
elif [ "$precision" = "bf16" ] ; then
echo "bf16 activated!"
USE_FP16="--bf16"
else
echo "fp32/tf32 activated!"
USE_FP16=""
fi
if [ "$use_preln" = "true" ] ; then
echo "Trained with PreLN"
USE_FP16="--pre_ln $USE_FP16"
else
echo "Trained with PostLN"
fi
PHASE1_CKPT=${PHASE1_CKPT:-"${RESULTS_DIR}/phase_1/_step${train_steps_phase1}.ckpt"}
export TOKENIZERS_PARALLELISM=true;
python -m torch.distributed.launch --nproc_per_node=${num_gpus} pretrain.py \
--data_dir=${DATA_DIR}/pretrain_lddl_${SOURCE_LEN} \
--config_path=${config_path} \
--output_dir=${RESULTS_DIR_PHASE2} \
--num_workers 4 \
--learning_rate=${learning_rate_phase2} \
${USE_FP16} \
--do_train \
--train_batch_size=${train_batch_size_phase2} --gradient_accumulation_steps=${num_accumulation_steps_phase2} \
--max_steps=${train_steps_phase2} --warmup_steps=${warmup_steps_phase2} \
--max_source_length=${SOURCE_LEN} \
--lr_scheduler polynomial \
--label_smoothing 0 \
--weight_decay 0.1 \
--dropout 0.1 --attention_dropout 0.1 --gradient_clip_val=0.1 \
--resume_from_checkpoint=${PHASE1_CKPT} --load_model_weights_only \
--save_checkpoint_steps=${save_checkpoints_steps} --log_freq=${save_checkpoints_steps} \
--allreduce_post_accumulation_half_precision \
--seed $RANDOM --lamb |& tee -a ${LOGFILE}
|
PyTorch/Forecasting/TFT/triton/runner | runner | stages | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Optional, Tuple, Union
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
class ResultsType:
"""
Results types generated by runner
"""
TRITON_PERFORMANCE_OFFLINE = "triton_performance_offline"
TRITON_PERFORMANCE_ONLINE = "triton_performance_online"
class Stage:
"""
Stage definition
"""
label: str
commands: List[Command]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
commands: Union[Tuple[str, ...], List[str]],
result_path: Optional[str] = None,
result_type: Optional[str] = None,
):
"""
Args:
commands: List or Tuple of commands provided as raw string
result_path: Path to results file generated by stage
result_type: Type of results generated by stage
"""
if type(commands) not in [tuple, list]:
raise ValueError("""Incorrect type of commands list. Please, provide list of commands as tuple.""")
self.commands = list(map(lambda command: Command(data=command), commands))
self.result_path = result_path
self.result_type = result_type
class ExportStage(Stage):
label = "Export Model"
class ConversionStage(Stage):
label = "Convert Model"
class DeployStage(Stage):
label = "Deploy Model"
class CorrectnessStage(Stage):
label = "Model Correctness Tests"
class TritonPreparePerformanceProfilingDataStage(Stage):
label = "Prepare Triton Profiling Data"
class TritonPerformanceOfflineStage(Stage):
label = "Triton Performance Offline Tests"
class TritonPerformanceOnlineStage(Stage):
label = "Triton Performance Online Tests"
|
PyTorch/Translation/Transformer/scripts | scripts | run_DGXA100_AMP | #! /bin/bash
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
nvidia-smi
RESULTS_DIR='/results'
CHECKPOINTS_DIR='/results/checkpoints'
mkdir -p $CHECKPOINTS_DIR
: ${SEED:=1}
: ${LR:=0.000846}
: ${WARMUP:=4000}
: ${NUM_EPOCHS:=30}
: ${BS:=10240}
: ${NUM_GPU:=8}
STAT_FILE=${RESULTS_DIR}/DGXA100_amp_${NUM_GPU}GPU_log.json
DISTRIBUTED="-m torch.distributed.run --nproc_per_node=${NUM_GPU}"
python ${DISTRIBUTED} /workspace/translation/train.py \
/data/ \
--arch transformer_wmt_en_de_big_t2t \
--share-all-embeddings \
--optimizer adam \
--adam-betas 0.9 0.997 \
--adam-eps 1e-9 \
--clip-norm 0.0 \
--lr-scheduler inverse_sqrt \
--warmup-init-lr 0.0 \
--warmup-updates ${WARMUP} \
--lr $LR \
--min-lr 0.0 \
--dropout 0.1 \
--weight-decay 0.0 \
--criterion label_smoothed_cross_entropy \
--label-smoothing 0.1 \
--max-tokens ${BS} \
--seed ${SEED} \
--max-epoch ${NUM_EPOCHS} \
--no-epoch-checkpoints \
--fuse-layer-norm \
--online-eval \
--log-interval 500 \
--save-dir ${RESULTS_DIR} \
--stat-file ${STAT_FILE} \
--amp
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync | # SSD with Resnet 50 v1 FPN feature extractor, shared box predictor and focal
# loss (a.k.a Retinanet).
# See Lin et al, https://arxiv.org/abs/1708.02002
# Trained on COCO, initialized from Imagenet classification checkpoint
# Achieves 35.2 mAP on COCO14 minival dataset. Doubling the number of training
# steps to 50k gets 36.9 mAP
# This config is TPU compatible
model {
ssd {
inplace_batchnorm_update: true
freeze_batchnorm: false
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
encode_background_as_zeros: true
anchor_generator {
multiscale_anchor_generator {
min_level: 3
max_level: 7
anchor_scale: 4.0
aspect_ratios: [1.0, 2.0, 0.5]
scales_per_octave: 2
}
}
image_resizer {
fixed_shape_resizer {
height: 640
width: 640
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
depth: 256
class_prediction_bias_init: -4.6
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
}
num_layers_before_predictor: 4
kernel_size: 3
}
}
feature_extractor {
type: 'ssd_resnet50_v1_fpn'
fpn {
min_level: 3
max_level: 7
}
min_depth: 16
depth_multiplier: 1.0
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
}
override_base_feature_extractor_hyperparams: true
}
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
}
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
normalize_loc_loss_by_codesize: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
batch_size: 64
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 25000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_crop_image {
min_object_covered: 0.0
min_aspect_ratio: 0.75
max_aspect_ratio: 3.0
min_area: 0.75
max_area: 1.0
overlap_thresh: 0.0
}
}
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 25000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-00000-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
num_examples: 8000
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-00000-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
} |
PyTorch/LanguageModeling/BERT/triton/dist6l/scripts | scripts | setup_environment | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKDIR="${WORKDIR:=$(pwd)}"
export DATASETS_DIR=${WORKDIR}/datasets
export WORKSPACE_DIR=${WORKDIR}/runner_workspace
export CHECKPOINTS_DIR=${WORKSPACE_DIR}/checkpoints
export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store
export SHARED_DIR=${WORKSPACE_DIR}/shared_dir
echo "Preparing directories"
mkdir -p ${WORKSPACE_DIR}
mkdir -p ${DATASETS_DIR}
mkdir -p ${CHECKPOINTS_DIR}
mkdir -p ${MODEL_REPOSITORY_PATH}
mkdir -p ${SHARED_DIR}
echo "Setting up environment"
export MODEL_NAME=BERT
export ENSEMBLE_MODEL_NAME=
export TRITON_LOAD_MODEL_METHOD=explicit
export TRITON_INSTANCES=1 |
PyTorch/LanguageModeling/BERT/triton/dist4l/runner | runner | __main__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) |
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_resnet_v1_fpn_feature_extractor_testbase | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 FPN feature extractors."""
import abc
import numpy as np
import tensorflow as tf
from object_detection.models import ssd_feature_extractor_test
class SSDResnetFPNFeatureExtractorTestBase(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
"""Helper test class for SSD Resnet v1 FPN feature extractors."""
@abc.abstractmethod
def _resnet_scope_name(self):
pass
@abc.abstractmethod
def _fpn_scope_name(self):
return 'fpn'
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 254
image_width = 254
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = tf.constant(np.random.rand(4, image_height, image_width, 3))
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
with self.test_session() as sess:
test_image_out, preprocessed_image_out = sess.run(
[test_image, preprocessed_image])
self.assertAllClose(preprocessed_image_out,
test_image_out - [[123.68, 116.779, 103.939]])
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
g = tf.Graph()
with g.as_default():
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
feature_extractor.extract_features(preprocessed_inputs)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for variable in variables:
self.assertTrue(
variable.name.startswith(self._resnet_scope_name())
or variable.name.startswith(self._fpn_scope_name()))
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/scripts | scripts | run_electricity | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
: ${SEED:=1}
: ${LR:=1e-3}
: ${NGPU:=8}
: ${BATCH_SIZE:=1024}
: ${EPOCHS:=30}
python -m torch.distributed.run --nproc_per_node=${NGPU} train.py \
--dataset electricity \
--data_path /data/processed/electricity_bin \
--batch_size=${BATCH_SIZE} \
--sample 450000 50000 \
--lr ${LR} \
--epochs ${EPOCHS} \
--seed ${SEED} \
--use_amp \
--results /results/TFT_electricity_bs${NGPU}x${BATCH_SIZE}_lr${LR}/seed_${SEED}
|
PyTorch/Segmentation/nnUNet/triton/scripts | scripts | setup_environment | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKDIR="$(pwd)"
export WORKSPACE_DIR=${WORKDIR}/workspace
export DATASETS_DIR=${WORKSPACE_DIR}/datasets_dir/01_3d/
export CHECKPOINT_DIR=${WORKSPACE_DIR}/checkpoint_dir
export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store
export SHARED_DIR=${WORKSPACE_DIR}/shared_dir
echo "Preparing directories"
mkdir -p ${WORKSPACE_DIR}
mkdir -p ${DATASETS_DIR}
mkdir -p ${CHECKPOINT_DIR}
mkdir -p ${MODEL_REPOSITORY_PATH}
mkdir -p ${SHARED_DIR}
echo "Setting up environment"
export MODEL_NAME=nnunet
export TRITON_LOAD_MODEL_METHOD=explicit
export TRITON_INSTANCES=1
export TRITON_SERVER_URL=127.0.0.1
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/optim | optim | fp16_optimizer | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import defaultdict
import torch
from common.fairseq.optim.dynamic_loss_scaler import DynamicLossScaler
@torch.no_grad()
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
def grad_exists(p):
return p is not None and getattr(p, "grad", None) is not None
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [
p.grad.detach() for p in params if grad_exists(p) and not hasattr(p, "expert")
]
expert_grads = [
p.grad.detach() for p in params if grad_exists(p) and hasattr(p, "expert")
]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.0)
else:
return torch.tensor(0.0)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
# XXX Missing imports
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads + expert_grads:
g.mul_(clip_coef)
return total_norm
class FairseqOptimizer(object):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
@classmethod
def add_args(cls, parser):
"""Add optimizer-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer):
"""Reset optimizer instance."""
if not hasattr(self, "_optimizer"):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError("_optimizer must be an instance of torch.optim.Optimizer")
self._optimizer = optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
@property
def params(self):
"""Return an iterable of the parameters held by the optimizer."""
for param_group in self.param_groups:
for p in param_group["params"]:
yield p
@property
def param_groups(self):
return self.optimizer.param_groups
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
"""Return the current learning rate."""
return self.param_groups[0]["lr"]
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.param_groups:
param_group["lr"] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
loss.backward()
def all_reduce_grads(self, module):
"""Manually all-reduce gradients (if required)."""
if hasattr(module, "all_reduce_grads"):
module.all_reduce_grads()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for p in self.params:
if p.grad is not None:
if torch.is_tensor(c):
c = c.to(p.grad.device)
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
return clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
def step(self, closure=None, scale=1.0, groups=None):
"""Performs a single optimization step."""
if self.supports_step_with_scale:
if self.supports_groups:
self.optimizer.step(closure, scale=scale, groups=groups)
else:
self.optimizer.step(closure, scale=scale)
else:
if scale != 1.0:
self.multiply_grads(1.0 / scale)
if self.supports_groups:
self.optimizer.step(closure, groups=groups)
else:
self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, "supports_memory_efficient_fp16"):
return self.optimizer.supports_memory_efficient_fp16
return False
@property
def supports_step_with_scale(self):
if hasattr(self.optimizer, "supports_step_with_scale"):
return self.optimizer.supports_step_with_scale
return False
@property
def supports_groups(self):
if hasattr(self.optimizer, "supports_groups"):
return self.optimizer.supports_groups
return False
@property
def supports_flat_params(self):
"""
Whether the optimizer supports collapsing of the model
parameters/gradients into a single contiguous Tensor.
"""
if hasattr(self.optimizer, "supports_flat_params"):
return self.optimizer.supports_flat_params
return False
def broadcast_global_state_dict(self, state_dict):
"""
Broadcasts a global state dict to all ranks.
Useful for optimizers that shard state between ranks.
"""
if hasattr(self.optimizer, "broadcast_global_state_dict"):
return self.optimizer.broadcast_global_state_dict(state_dict)
else:
return state_dict
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in mro(method resolution order)
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
@property
def has_flat_params(self):
return torch.is_tensor(self.fp32_params) or (
isinstance(self.fp32_params, dict)
and all(torch.is_tensor(t) for t in self.fp32_params.values())
)
@classmethod
def build_fp32_params(cls, args, params, flatten=True):
# create FP32 copy of parameters and grads
if flatten:
is_pipeline_parallel = getattr(
args, "pipeline_model_parallel", False
) and getattr(args, "distributed_no_spawn", False)
total_param_size = sum(p.data.numel() for p in params)
devices = [torch.cuda.current_device()]
if is_pipeline_parallel:
devices = list(set(args.pipeline_devices))
fp32_params = {}
for device in devices:
if is_pipeline_parallel:
device_param_size = sum(
p.data.numel() for p in params if p.device.index == device
)
device_params = [p for p in params if p.device.index == device]
else:
device_param_size = total_param_size
device_params = params
fp32_params[device] = (
device_params[0].new(0).float().new(device_param_size)
)
offset = 0
for p in device_params:
numel = p.data.numel()
fp32_params[device][offset : offset + numel].copy_(p.data.view(-1))
offset += numel
fp32_params[device] = torch.nn.Parameter(fp32_params[device])
fp32_params[device].grad = fp32_params[device].data.new(
device_param_size
)
return fp32_params
else:
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float())
if hasattr(p, 'expert'):
p32.expert = True
p32.grad = torch.zeros_like(p32.data)
if hasattr(p, "param_group"):
p32.param_group = p.param_group
fp32_params.append(p32)
return fp32_params
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
if self.scaler is not None:
state_dict["loss_scale"] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if "loss_scale" in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict["loss_scale"]
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self):
if self._needs_sync:
# copy FP16 grads to FP32
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
if p.requires_grad:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
grad_data = (
p.grad.data
if p.grad is not None
else p.data.new_zeros(p.data.shape)
)
numel = grad_data.numel()
self.fp32_params[device].grad.data[
offset : offset + numel
].copy_(grad_data.view(-1))
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
if p.grad is not None:
if p32.grad is None:
p32.grad = p.grad.data.float()
else:
p32.grad.data.copy_(p.grad.data)
else:
p32.grad = torch.zeros_like(p.data, dtype=torch.float)
self._needs_sync = False
def _sync_fp32_params_to_fp16(self):
# copy FP32 params back into FP16 model
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
numel = p.data.numel()
p.data.copy_(
self.fp32_params[device]
.data[offset : offset + numel]
.view_as(p.data)
)
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
p.data.copy_(p32.data)
def _unscale_grads(self):
self._sync_fp16_grads_to_fp32()
if (
# Skip the multiplication if it's a no-op (i.e., if _multiply_factor
# is 1.0). At the same time, we want to avoid the device-to-host
# transfer by comparing it to 1.0. Since _multiply_factor starts as
# a Python float, we roughly assume that if it's a tensor then it's
# probably not =1.0 anymore and we do the multiplication. Otherwise
# we can safely check the value without a D2H transfer.
torch.is_tensor(self._multiply_factor)
or self._multiply_factor != 1.0
):
self.fp32_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = self._multiply_factor * self.fp32_optimizer.clip_grad_norm(
0, aggregate_norm_fn
)
if self.scaler is not None:
if grad_norm > max_norm > 0.0:
self._multiply_factor *= max_norm / grad_norm
self.scaler.check_overflow(grad_norm)
elif max_norm > 0.0:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
if getattr(self, "supports_step_with_scale", False):
self.fp32_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
else:
self._unscale_grads()
self.fp32_optimizer.step(closure, groups=groups)
if self.scaler is not None:
self.scaler.update()
self._sync_fp32_params_to_fp16()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
if p32.grad is not None:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
class FP16Optimizer(_FP16OptimizerMixin, FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, cfg, params, fp32_optimizer, fp32_params, **kwargs):
super().__init__(cfg.optimizer)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
scale_window = int(2 ** 14 / cfg.world_size / cfg.update_freq)
if not (cfg.bf16 and cfg.bf16_disable_loss_scaler):
self.scaler = DynamicLossScaler(
init_scale=cfg.fp16_init_scale,
scale_window=scale_window,
tolerance=0.0,
threshold=None,
min_loss_scale=cfg.min_loss_scale,
)
else:
print('Disabled loss scaler.')
# disable loss scaling for bfloat16
self.scaler = None
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@optimizer.setter
def optimizer(self, optimizer):
self.fp32_optimizer.optimizer = optimizer
@property
def lr_scheduler(self):
return getattr(self.fp32_optimizer, "lr_scheduler", None)
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.fp32_optimizer.all_reduce_grads(module)
@property
def supports_flat_params(self):
return self.fp32_optimizer.supports_flat_params
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/cc/kernels/volta | volta | dot_based_interact_volta | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dot_based_interact_volta.h"
#include "../launchers/dot_based_interact_fp32_launcher.cu"
#include "../launchers/dot_based_interact_fp16_launcher.cu"
void dotBasedInteractVoltaF16Fwd(const void *input,
const void *bottom_mlp_output,
void *output,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream) {
dotBasedInteractFP16Fwd(input, bottom_mlp_output, output, batch_size, num_rows, num_cols, stream);
}
void dotBasedInteractVoltaF16Bwd(const void *input,
const void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream) {
dotBasedInteractFP16Bwd(input, upstream_grad, grad, bottom_mlp_grad, batch_size, num_rows, num_cols, stream);
}
void dotBasedInteractVoltaF32Fwd(const void *input,
const void *bottom_mlp_output,
void *output,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream) {
dotBasedInteractFP32Fwd(input, bottom_mlp_output, output, batch_size, num_rows, num_cols, stream);
}
void dotBasedInteractVoltaF32Bwd(const void *input,
const void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
cudaStream_t stream) {
dotBasedInteractFP32Bwd(input, upstream_grad, grad, bottom_mlp_grad, batch_size, num_rows, num_cols, stream);
}
|
TensorFlow/Segmentation/UNet_Industrial/scripts | scripts | launch_docker | #!/usr/bin/env bash
DATASET_DIR=$(realpath -s $1)
RESULT_DIR=$(realpath -s $2)
if [[ ! -e ${DATASET_DIR} ]]; then
echo "creating ${DATASET_DIR} ..."
mkdir -p "${DATASET_DIR}"
fi
if [[ ! -e ${RESULT_DIR} ]]; then
echo "creating ${RESULT_DIR} ..."
mkdir -p "${RESULT_DIR}"
fi
# Build the docker container
docker build . --rm -t unet_industrial:latest
# start the container with nvidia-docker
nvidia-docker run -it --rm \
--shm-size=2g --ulimit memlock=-1 --ulimit stack=67108864 \
-v ${DATASET_DIR}:/data/dagm2007/ \
-v ${RESULT_DIR}:/results \
unet_industrial:latest |
PyTorch/SpeechSynthesis/FastPitch/hifigan | hifigan | models | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# ResBlock1, ResBlock2, Generator, DiscriminatorP, DiscriminatorS, MultiScaleDiscriminator,
# MultiPeriodDiscriminator, feature_loss, discriminator_loss, generator_loss,
# init_weights, get_padding
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from common.stft import STFT
from common.utils import AttrDict, init_weights, get_padding
LRELU_SLOPE = 0.1
class NoAMPConv1d(Conv1d):
def __init__(self, *args, no_amp=False, **kwargs):
super().__init__(*args, **kwargs)
self.no_amp = no_amp
def _cast(self, x, dtype):
if isinstance(x, (list, tuple)):
return [self._cast(t, dtype) for t in x]
else:
return x.to(dtype)
def forward(self, *args):
if not self.no_amp:
return super().forward(*args)
with torch.cuda.amp.autocast(enabled=False):
return self._cast(
super().forward(*self._cast(args, torch.float)), args[0].dtype)
class ResBlock1(nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, conf, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
self.conf = conf
self.lrelu_slope = LRELU_SLOPE
ch, ks = channels, kernel_size
self.convs1 = nn.Sequential(*[
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[0]), dilation[0])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[1]), dilation[1])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[2]), dilation[2])),
])
self.convs2 = nn.Sequential(*[
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
])
self.convs1.apply(init_weights)
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c1(xt)
xt = F.leaky_relu(xt, self.lrelu_slope)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, conf, channels, kernel_size=3, dilation=(1, 3)):
super().__init__()
self.conf = conf
ch, ks = channels, kernel_size
self.convs = nn.ModuleList([
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(kernel_size, dilation[0]), dilation[0])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(kernel_size, dilation[1]), dilation[1])),
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(nn.Module):
__constants__ = ['lrelu_slope', 'num_kernels', 'num_upsamples']
def __init__(self, conf):
super().__init__()
conf = AttrDict(conf)
self.conf = conf
self.num_kernels = len(conf.resblock_kernel_sizes)
self.num_upsamples = len(conf.upsample_rates)
self.conv_pre = weight_norm(
Conv1d(80, conf.upsample_initial_channel, 7, 1, padding=3))
self.lrelu_slope = LRELU_SLOPE
resblock = ResBlock1 if conf.resblock == '1' else ResBlock2
self.ups = []
for i, (u, k) in enumerate(zip(conf.upsample_rates,
conf.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(conf.upsample_initial_channel // (2 ** i),
conf.upsample_initial_channel // (2 ** (i + 1)),
k, u, padding=(k-u)//2)))
self.ups = nn.Sequential(*self.ups)
self.resblocks = []
for i in range(len(self.ups)):
resblock_list = []
ch = conf.upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(conf.resblock_kernel_sizes,
conf.resblock_dilation_sizes)):
resblock_list.append(resblock(conf, ch, k, d))
resblock_list = nn.Sequential(*resblock_list)
self.resblocks.append(resblock_list)
self.resblocks = nn.Sequential(*self.resblocks)
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def load_state_dict(self, state_dict, strict=True):
# Fallback for old checkpoints (pre-ONNX fix)
new_sd = {}
for k, v in state_dict.items():
new_k = k
if 'resblocks' in k:
parts = k.split(".")
# only do this is the checkpoint type is older
if len(parts) == 5:
layer = int(parts[1])
new_layer = f"{layer//3}.{layer%3}"
new_k = f"resblocks.{new_layer}.{'.'.join(parts[2:])}"
new_sd[new_k] = v
# Fix for conv1d/conv2d/NHWC
curr_sd = self.state_dict()
for key in new_sd:
len_diff = len(new_sd[key].size()) - len(curr_sd[key].size())
if len_diff == -1:
new_sd[key] = new_sd[key].unsqueeze(-1)
elif len_diff == 1:
new_sd[key] = new_sd[key].squeeze(-1)
super().load_state_dict(new_sd, strict=strict)
def forward(self, x):
x = self.conv_pre(x)
for upsample_layer, resblock_group in zip(self.ups, self.resblocks):
x = F.leaky_relu(x, self.lrelu_slope)
x = upsample_layer(x)
xs = 0
for resblock in resblock_group:
xs += resblock(x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('HiFi-GAN: Removing weight norm.')
for l in self.ups:
remove_weight_norm(l)
for group in self.resblocks:
for block in group:
block.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class Denoiser(nn.Module):
""" Removes model bias from audio produced with hifigan """
def __init__(self, hifigan, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros', **infer_kw):
super().__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).cuda()
for name, p in hifigan.named_parameters():
if name.endswith('.weight'):
dtype = p.dtype
device = p.device
break
mel_init = {'zeros': torch.zeros, 'normal': torch.randn}[mode]
mel_input = mel_init((1, 80, 88), dtype=dtype, device=device)
with torch.no_grad():
bias_audio = hifigan(mel_input, **infer_kw).float()
if len(bias_audio.size()) > 2:
bias_audio = bias_audio.squeeze(0)
elif len(bias_audio.size()) < 2:
bias_audio = bias_audio.unsqueeze(0)
assert len(bias_audio.size()) == 2
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
class DiscriminatorP(nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super().__init__()
self.period = period
norm_f = spectral_norm if use_spectral_norm else weight_norm
ks = kernel_size
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (ks, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def share_params_of(self, dp):
assert len(self.convs) == len(dp.convs)
for c1, c2 in zip(self.convs, dp.convs):
c1.weight = c2.weight
c1.bias = c2.bias
class MultiPeriodDiscriminator(nn.Module):
def __init__(self, periods, concat_fwd=False):
super().__init__()
layers = [DiscriminatorP(p) for p in periods]
self.discriminators = nn.ModuleList(layers)
self.concat_fwd = concat_fwd
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if self.concat_fwd:
y_ds, fmaps = d(concat_discr_input(y, y_hat))
y_d_r, y_d_g, fmap_r, fmap_g = split_discr_output(y_ds, fmaps)
else:
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(nn.Module):
def __init__(self, use_spectral_norm=False, no_amp_grouped_conv=False):
super().__init__()
norm_f = spectral_norm if use_spectral_norm else weight_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(NoAMPConv1d(128, 256, 41, 2, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(256, 512, 41, 4, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(512, 1024, 41, 4, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(1024, 1024, 41, 1, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
# x = l(x.unsqueeze(-1)).squeeze(-1)
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(nn.Module):
def __init__(self, no_amp_grouped_conv=False, concat_fwd=False):
super().__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True, no_amp_grouped_conv=no_amp_grouped_conv),
DiscriminatorS(no_amp_grouped_conv=no_amp_grouped_conv),
DiscriminatorS(no_amp_grouped_conv=no_amp_grouped_conv),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=1),
AvgPool1d(4, 2, padding=1)
])
self.concat_fwd = concat_fwd
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if self.concat_fwd:
ys = concat_discr_input(y, y_hat)
if i != 0:
ys = self.meanpools[i-1](ys)
y_ds, fmaps = d(ys)
y_d_r, y_d_g, fmap_r, fmap_g = split_discr_output(y_ds, fmaps)
else:
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def concat_discr_input(y, y_hat):
return torch.cat((y, y_hat), dim=0)
def split_discr_output(y_ds, fmaps):
y_d_r, y_d_g = torch.chunk(y_ds, 2, dim=0)
fmap_r, fmap_g = zip(*(torch.chunk(f, 2, dim=0) for f in fmaps))
return y_d_r, y_d_g, fmap_r, fmap_g
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
|
PyTorch/LanguageModeling/BERT/triton/runner/maintainer/docker/containers | containers | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .triton_server_container import TritonServerContainer
|
PyTorch/Recommendation/DLRM/dlrm/cuda_src/dot_based_interact | dot_based_interact | dot_based_interact_fp32_fwd | #include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "shared_utils.cuh"
using namespace nvcuda;
template <uint THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractF32FwdKernelNonAligned(const float *__restrict input,
float *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint input_size,
uint output_size,
uint interaction_output_size) {
extern __shared__ float smem_f32_fwd[];
float *smem_in = &smem_f32_fwd[0];
uint input_batch_offset = blockIdx.x * input_size;
const float *gmem_in = &input[input_batch_offset];
// The layout of each output row is bottom_mlp | interactions | padding
uint output_batch_offset = blockIdx.x * output_size;
float *gmem_out_bottom_mlp = &output[output_batch_offset];
float *gmem_out_interaction = &output[output_batch_offset + num_cols];
// Load the input - one sample per block
for (uint idx = threadIdx.x; idx < input_size; idx += blockDim.x) {
smem_in[idx] = gmem_in[idx];
}
__syncthreads();
// Copy bottom MLP output to output
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
gmem_out_bottom_mlp[idx] = smem_in[idx];
}
for (uint idx = threadIdx.x; idx < (interaction_output_size); idx += blockDim.x) {
uint elems_per_row = 1;
uint index = idx;
while (index >= elems_per_row) {
index -= elems_per_row;
elems_per_row++;
}
uint target_row = elems_per_row;
uint target_col = index;
float sum = 0;
for (uint i = 0; i < num_cols; i++) {
float tmp1 = smem_in[target_row * num_cols + i];
float tmp2 = smem_in[target_col * num_cols + i];
sum = fmaf(tmp1, tmp2, sum);
}
gmem_out_interaction[idx] = sum;
}
// Zero out the padding
uint zeroout_index = num_cols + interaction_output_size + threadIdx.x;
if(zeroout_index < output_size){
gmem_out_bottom_mlp[zeroout_index] = 0;
}
}
template <uint THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractF32FwdKernel(const float *__restrict input,
float *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint input_size,
uint output_size,
uint interaction_output_size) {
extern __shared__ float smem_f32_fwd[];
float *smem_in = &smem_f32_fwd[0];
//launch one block per sample in batch
uint input_batch_offset = blockIdx.x * input_size;
const float *gmem_in = &input[input_batch_offset];
// The layout of each output row is bottom_mlp | interactions | padding
uint output_batch_offset = blockIdx.x * output_size;
float *gmem_out_bottom_mlp = &output[output_batch_offset];
float *gmem_out_interaction = &output[output_batch_offset + num_cols];
// Load the input - one sample per block
uint input_size_float4 = input_size >> 2;
for (uint idx = threadIdx.x; idx < input_size_float4; idx += blockDim.x) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_in)[idx];
}
__syncthreads();
// Copy bottom MLP output to output
uint btm_mlp_out_size_float4 = num_cols >> 2;
for (uint idx = threadIdx.x; idx < btm_mlp_out_size_float4; idx += blockDim.x) {
((float4 *)gmem_out_bottom_mlp)[idx] = ((float4 *)smem_in)[idx];
}
for (uint idx = threadIdx.x; idx < (interaction_output_size); idx += blockDim.x) {
uint elems_per_row = 1;
uint index = idx;
while (index >= elems_per_row) {
index -= elems_per_row;
elems_per_row++;
}
uint target_row = elems_per_row;
uint target_col = index;
float4 sum;
sum.x = 0;
sum.y = 0;
sum.z = 0;
sum.w = 0;
uint num_cols_float4 = num_cols >> 2;
for (uint i = 0; i < num_cols_float4; i++) {
float4 tmp1 = ((float4 *)smem_in)[target_row * num_cols_float4 + i];
float4 tmp2 = ((float4 *)smem_in)[target_col * num_cols_float4 + i];
sum.x = fmaf(tmp1.x, tmp2.x, sum.x);
sum.y = fmaf(tmp1.y, tmp2.y, sum.y);
sum.z = fmaf(tmp1.z, tmp2.z, sum.z);
sum.w = fmaf(tmp1.w, tmp2.w, sum.w);
}
gmem_out_interaction[idx] = sum.x + sum.y + sum.z + sum.w;
}
// Zero out the padding
uint zeroout_index = num_cols + interaction_output_size + threadIdx.x;
if(zeroout_index < output_size){
gmem_out_bottom_mlp[zeroout_index] = 0;
}
}
inline void dotBasedInteractF32Fwd(const void *input,
const void *bottom_mlp_output,
const void *output,
uint batch_size,
uint num_rows,
uint num_cols) {
const uint kNumThreads = 128;
uint num_blocks = batch_size;
// Output
uint interaction_output_size = (num_rows * (num_rows - 1)) >> 1;
uint output_size = ((interaction_output_size+num_cols-1)/8 + 1)*8; //round up to multiple of 8
// Input
uint input_size = num_rows * num_cols;
uint shared_mem_size_elems = input_size;
uint shared_mem_size_bytes = shared_mem_size_elems << 2; // F32 Kernel
bool float4_predicate = !((num_cols & 3) || (output_size & 3));
if (float4_predicate) {
dotBasedInteractF32FwdKernel<kNumThreads>
<<<num_blocks, kNumThreads, shared_mem_size_bytes, at::cuda::getCurrentCUDAStream()>>>((const float *)input,
(float *)output,
batch_size,
num_rows,
num_cols,
input_size,
output_size,
interaction_output_size);
} else {
dotBasedInteractF32FwdKernelNonAligned<kNumThreads>
<<<num_blocks, kNumThreads, shared_mem_size_bytes,
at::cuda::getCurrentCUDAStream()>>>((const float *)input,
(float *)output,
batch_size,
num_rows,
num_cols,
input_size,
output_size,
interaction_output_size);
}
} |
TensorFlow/Detection/SSD/models/research/slim/scripts | scripts | finetune_inception_v3_on_flowers | #!/bin/bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script performs the following operations:
# 1. Downloads the Flowers dataset
# 2. Fine-tunes an InceptionV3 model on the Flowers training set.
# 3. Evaluates the model on the Flowers validation set.
#
# Usage:
# cd slim
# ./slim/scripts/finetune_inception_v3_on_flowers.sh
set -e
# Where the pre-trained InceptionV3 checkpoint is saved to.
PRETRAINED_CHECKPOINT_DIR=/tmp/checkpoints
# Where the training (fine-tuned) checkpoint and logs will be saved to.
TRAIN_DIR=/tmp/flowers-models/inception_v3
# Where the dataset is saved to.
DATASET_DIR=/tmp/flowers
# Download the pre-trained checkpoint.
if [ ! -d "$PRETRAINED_CHECKPOINT_DIR" ]; then
mkdir ${PRETRAINED_CHECKPOINT_DIR}
fi
if [ ! -f ${PRETRAINED_CHECKPOINT_DIR}/inception_v3.ckpt ]; then
wget http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz
tar -xvf inception_v3_2016_08_28.tar.gz
mv inception_v3.ckpt ${PRETRAINED_CHECKPOINT_DIR}/inception_v3.ckpt
rm inception_v3_2016_08_28.tar.gz
fi
# Download the dataset
python download_and_convert_data.py \
--dataset_name=flowers \
--dataset_dir=${DATASET_DIR}
# Fine-tune only the new layers for 1000 steps.
python train_image_classifier.py \
--train_dir=${TRAIN_DIR} \
--dataset_name=flowers \
--dataset_split_name=train \
--dataset_dir=${DATASET_DIR} \
--model_name=inception_v3 \
--checkpoint_path=${PRETRAINED_CHECKPOINT_DIR}/inception_v3.ckpt \
--checkpoint_exclude_scopes=InceptionV3/Logits,InceptionV3/AuxLogits \
--trainable_scopes=InceptionV3/Logits,InceptionV3/AuxLogits \
--max_number_of_steps=1000 \
--batch_size=32 \
--learning_rate=0.01 \
--learning_rate_decay_type=fixed \
--save_interval_secs=60 \
--save_summaries_secs=60 \
--log_every_n_steps=100 \
--optimizer=rmsprop \
--weight_decay=0.00004
# Run evaluation.
python eval_image_classifier.py \
--checkpoint_path=${TRAIN_DIR} \
--eval_dir=${TRAIN_DIR} \
--dataset_name=flowers \
--dataset_split_name=validation \
--dataset_dir=${DATASET_DIR} \
--model_name=inception_v3
# Fine-tune all the new layers for 500 steps.
python train_image_classifier.py \
--train_dir=${TRAIN_DIR}/all \
--dataset_name=flowers \
--dataset_split_name=train \
--dataset_dir=${DATASET_DIR} \
--model_name=inception_v3 \
--checkpoint_path=${TRAIN_DIR} \
--max_number_of_steps=500 \
--batch_size=32 \
--learning_rate=0.0001 \
--learning_rate_decay_type=fixed \
--save_interval_secs=60 \
--save_summaries_secs=60 \
--log_every_n_steps=10 \
--optimizer=rmsprop \
--weight_decay=0.00004
# Run evaluation.
python eval_image_classifier.py \
--checkpoint_path=${TRAIN_DIR}/all \
--eval_dir=${TRAIN_DIR}/all \
--dataset_name=flowers \
--dataset_split_name=validation \
--dataset_dir=${DATASET_DIR} \
--model_name=inception_v3
|
PyTorch/Forecasting/TFT/triton/deployment_toolkit/bermuda | bermuda | pyt | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
from collections import Counter
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
import torch # pytype: disable=import-error
import yaml
from model_navigator.model import ModelSignatureConfig
from model_navigator.tensor import TensorSpec
from model_navigator.utils.config import YamlConfigFile
from ..core import (
GET_MODEL_FN_NAME,
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
Format,
Model,
Precision,
load_from_file,
)
from ..extensions import loaders, runners, savers
from .utils import get_dynamic_axes, get_shapes_with_dynamic_axes
LOGGER = logging.getLogger(__name__)
def get_sample_input(dataloader, device):
for batch in dataloader:
_, x, _ = batch
break
if isinstance(x, dict):
sample_input = list(x.values())
elif isinstance(x, list):
sample_input = x
else:
raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict")
for idx, s in enumerate(sample_input):
sample_input[idx] = torch.from_numpy(s).to(device)
return tuple(sample_input)
def get_model_device(torch_model):
if next(torch_model.parameters()).is_cuda:
return "cuda"
else:
return "cpu"
def infer_model_precision(model):
counter = Counter()
for param in model.parameters():
counter[param.dtype] += 1
if counter[torch.float16] > 0:
return Precision.FP16
else:
return Precision.FP32
def _get_tensor_dtypes(dataloader, precision):
def _get_dtypes(t):
def _get_dtype(v):
dtype = str(v.dtype)
if dtype == "float64":
dtype = "float32"
if precision == Precision.FP16 and dtype == "float32":
dtype = "float16"
return np.dtype(dtype)
return {k: _get_dtype(v) for k, v in t.items()}
batch = next(dataloader)
_, x, y = batch
input_dtypes = _get_dtypes(x)
output_dtypes = _get_dtypes(y)
return input_dtypes, output_dtypes
### TODO assumption: floating point input
### type has same precision as the model
def _get_model_signature(
inputs_names: typing.List[str],
outputs_names: typing.List[str],
precision,
dataloader_fn,
batch_size_dim: typing.Optional[int] = None,
):
dataloader = dataloader_fn()
input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision)
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
inputs = {
name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in inputs_names
}
outputs = {
name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name]))
for name in outputs_names
}
return ModelSignatureConfig(inputs, outputs)
class PyTorchModelLoader(BaseLoader):
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, **kwargs):
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
model, io_names_dict = get_model(**self._model_args)
dataloader_fn = kwargs.get("dataloader_fn", None)
output_type = kwargs.get("output_type", None)
precision = infer_model_precision(model)
batch_axis = getattr(model, "bermuda_batch_axis", 0) # by default models supports batching; batch_axis=0
model_signature = _get_model_signature(
inputs_names=io_names_dict["inputs"],
outputs_names=io_names_dict["outputs"],
precision=precision,
dataloader_fn=dataloader_fn,
batch_size_dim=batch_axis,
)
model = Model(handle=model, precision=precision, inputs=model_signature.inputs, outputs=model_signature.outputs)
if output_type == Format.TS_TRACE.value:
return self._trace(model, dataloader_fn)
elif output_type == Format.TS_SCRIPT.value:
return self._script(model)
elif output_type == Format.ONNX.value:
return model
else:
raise ValueError(f"Not supported PyTorch format: {output_type}")
def _trace(self, model: Model, dataloader_fn) -> Model:
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
# Run dummy forward to initialize lazy modules
model.handle(*dummy_input)
traced_model = torch.jit.trace_module(model.handle, {"forward": dummy_input})
return Model(traced_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs)
def _script(self, model: Model) -> Model:
scripted_model = torch.jit.script(model.handle)
return Model(scripted_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs)
class TorchScriptLoader(BaseLoader):
def __init__(self, tensor_names_path: str = None, **kwargs):
self._model_args = kwargs
self._io_spec = None
if tensor_names_path is not None:
with Path(tensor_names_path).open("r") as fh:
tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader)
self._io_spec = ModelSignatureConfig(tensor_infos["inputs"], tensor_infos["outputs"])
def load(self, model_path: Union[str, Path], **_) -> Model:
if not isinstance(model_path, Path):
model_path = Path(model_path)
model = torch.jit.load(model_path.as_posix())
precision = infer_model_precision(model)
io_spec = self._io_spec
if not io_spec:
yaml_path = model_path.parent / f"{model_path.name}.yaml"
if not yaml_path.is_file():
raise ValueError(
f"If `--tensor-names-path is not provided, "
f"TorchScript model loader expects file {yaml_path} with tensor information."
)
with yaml_path.open("r") as fh:
tensor_info = yaml.load(fh, Loader=yaml.SafeLoader)
io_spec = ModelSignatureConfig(tensor_info["inputs"], tensor_info["outputs"])
return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class PYT2ONNXSaver(BaseSaver):
def __init__(self, onnx_opset: int = None):
self._onnx_opset = onnx_opset
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted."
dynamic_axes = get_dynamic_axes(dataloader_fn(), batch_size_dim=0)
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
model.handle(*dummy_input)
with torch.no_grad():
torch.onnx.export(
model.handle,
dummy_input,
model_path,
do_constant_folding=True,
input_names=list(model.inputs),
output_names=list(model.outputs),
dynamic_axes=dynamic_axes,
opset_version=self._onnx_opset,
)
class TorchScriptSaver(BaseSaver):
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
if not isinstance(model_path, Path):
model_path = Path(model_path)
if isinstance(model.handle, torch.jit.ScriptModule):
torch.jit.save(model.handle, model_path.as_posix())
else:
raise RuntimeError("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.")
signature_config = ModelSignatureConfig(inputs=model.inputs, outputs=model.outputs)
annotation_path = model_path.parent / f"{model_path.name}.yaml"
with YamlConfigFile(annotation_path) as config_file:
config_file.save_config(signature_config)
class PyTorchRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return PyTorchRunnerSession(model=model)
class PyTorchRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted."
self._model = model
self._output_names = None
def __enter__(self):
self._output_names = list(self._model.outputs)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._output_names = None
self._model = None
def __call__(self, x: Dict[str, object]):
with torch.no_grad():
feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()]
y_pred = self._model.handle(*feed_list)
if isinstance(y_pred, torch.Tensor):
y_pred = (y_pred,)
y_pred = [t.cpu().numpy() for t in y_pred]
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.PYT.value, PyTorchModelLoader)
loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader)
loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader)
savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver)
savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver)
savers.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXSaver)
runners.register_extension(Format.PYT.value, PyTorchRunner)
runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner)
runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
|
TensorFlow/Detection/SSD/examples | examples | SSD320_FP32_1GPU_BENCHMARK | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CKPT_DIR=${1:-"/results/SSD320_FP32_1GPU"}
PIPELINE_CONFIG_PATH=${2:-"/workdir/models/research/configs"}"/ssd320_bench.config"
GPUS=1
TENSOR_OPS=0
export TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
TRAIN_LOG=$(python -u ./object_detection/model_main.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--model_dir=${CKPT_DIR} \
--alsologtostder \
"${@:3}" 2>&1)
PERF=$(echo "$TRAIN_LOG" | sed -n 's|.*global_step/sec: \(\S\+\).*|\1|p' | python -c "import sys; x = sys.stdin.readlines(); x = [float(a) for a in x[int(len(x)*3/4):]]; print(32*$GPUS*sum(x)/len(x), 'img/s')")
mkdir -p $CKPT_DIR
echo "Single GPU single precision training performance: $PERF" | tee $CKPT_DIR/train_log
echo "$TRAIN_LOG" >> $CKPT_DIR/train_log
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection | object_detection | target_assigner | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import box_list, shape_utils
KEYPOINTS_FIELD_NAME = 'keypoints'
class TargetAssigner:
"""Target assigner to compute classification and regression targets."""
def __init__(self, similarity_calc, matcher, box_coder,
negative_class_weight=1.0, unmatched_cls_target=None):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with
respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if unmatched_cls_target is None:
self._unmatched_cls_target = tf.constant([0], tf.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
@property
def box_coder(self):
return self._box_coder
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,
groundtruth_weights=None, **params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match)
cls_targets = self._create_classification_targets(groundtruth_labels, match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match, groundtruth_weights)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4)
)
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:])
)
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
matched_anchors_mask = tf.expand_dims(matched_anchors_mask, axis=1)
matched_anchors_mask = tf.broadcast_to(matched_anchors_mask, shape=matched_reg_targets.get_shape())
reg_targets = tf.where(matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size * [0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=self._unmatched_cls_target,
ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
|
CUDA-Optimized/FastSpeech/fastspeech/text_norm | text_norm | symbols | # Copyright (c) 2017 Keith Ito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from . import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc | csrc | nms | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#pragma once
#include "cpu/vision.h"
#ifdef WITH_CUDA
#include "cuda/vision.h"
#endif
at::Tensor nms(const at::Tensor& dets,
const at::Tensor& scores,
const float threshold) {
if (dets.is_cuda()) {
#ifdef WITH_CUDA
// TODO raise error if not compiled with CUDA
if (dets.numel() == 0)
return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
auto b = at::cat({dets, scores.unsqueeze(1)}, 1);
return nms_cuda(b, threshold);
#else
AT_ERROR("Not compiled with GPU support");
#endif
}
at::Tensor result = nms_cpu(dets, scores, threshold);
return result;
}
|
TensorFlow/Detection/SSD/models/research/object_detection/data_decoders | data_decoders | tf_example_decoder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.data_decoders.tf_example_decoder."""
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
slim_example_decoder = tf.contrib.slim.tfexample_decoder
class TfExampleDecoderTest(tf.test.TestCase):
def _EncodeImage(self, image_tensor, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
elif encoding_type == 'png':
image_encoded = tf.image.encode_png(tf.constant(image_tensor)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_encoded
def _DecodeImage(self, image_encoded, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_decoded = tf.image.decode_jpeg(tf.constant(image_encoded)).eval()
elif encoding_type == 'png':
image_decoded = tf.image.decode_png(tf.constant(image_encoded)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_decoded
def testDecodeAdditionalChannels(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
additional_channel_tensor = np.random.randint(
256, size=(4, 5, 1)).astype(np.uint8)
encoded_additional_channel = self._EncodeImage(additional_channel_tensor)
decoded_additional_channel = self._DecodeImage(encoded_additional_channel)
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/additional_channels/encoded':
dataset_util.bytes_list_feature(
[encoded_additional_channel] * 2),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/source_id':
dataset_util.bytes_feature('image_id'),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_additional_channels=2)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
np.concatenate([decoded_additional_channel] * 2, axis=2),
tensor_dict[fields.InputDataFields.image_additional_channels])
def testDecodeJpegImage(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
decoded_jpeg = self._DecodeImage(encoded_jpeg)
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'),
'image/source_id': dataset_util.bytes_feature('image_id'),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.image].
get_shape().as_list()), [None, None, 3])
self.assertAllEqual((tensor_dict[fields.InputDataFields.
original_image_spatial_shape].
get_shape().as_list()), [2])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
def testDecodeImageKeyAndFilename(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/key/sha256': dataset_util.bytes_feature('abc'),
'image/filename': dataset_util.bytes_feature('filename')
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertEqual('abc', tensor_dict[fields.InputDataFields.key])
self.assertEqual('filename', tensor_dict[fields.InputDataFields.filename])
def testDecodePngImage(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png = self._EncodeImage(image_tensor, encoding_type='png')
decoded_png = self._DecodeImage(encoded_png, encoding_type='png')
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': dataset_util.bytes_feature(encoded_png),
'image/format': dataset_util.bytes_feature('png'),
'image/source_id': dataset_util.bytes_feature('image_id')
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.image].
get_shape().as_list()), [None, None, 3])
self.assertAllEqual((tensor_dict[fields.InputDataFields.
original_image_spatial_shape].
get_shape().as_list()), [2])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual('image_id', tensor_dict[fields.InputDataFields.source_id])
def testDecodePngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
mask_1 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
mask_2 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
encoded_png_1 = self._EncodeImage(mask_1, encoding_type='png')
decoded_png_1 = np.squeeze(mask_1.astype(np.float32))
encoded_png_2 = self._EncodeImage(mask_2, encoding_type='png')
decoded_png_2 = np.squeeze(mask_2.astype(np.float32))
encoded_masks = [encoded_png_1, encoded_png_2]
decoded_masks = np.stack([decoded_png_1, decoded_png_2])
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True, instance_mask_type=input_reader_pb2.PNG_MASKS)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
decoded_masks,
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
def testDecodeEmptyPngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
encoded_masks = []
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks),
'image/height':
dataset_util.int64_feature(10),
'image/width':
dataset_util.int64_feature(10),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True, instance_mask_type=input_reader_pb2.PNG_MASKS)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
[0, 10, 10])
def testDecodeBoundingBox(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_boxes]
.get_shape().as_list()), [None, 4])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
@test_util.enable_c_shapes
def testDecodeKeypoint(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_boxes]
.get_shape().as_list()), [None, 4])
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.groundtruth_keypoints].get_shape()
.as_list()), [2, 3, 2])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = (
np.vstack([keypoint_ys, keypoint_xs]).transpose().reshape((2, 3, 2)))
self.assertAllEqual(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
def testDecodeDefaultGroundtruthWeights(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_boxes]
.get_shape().as_list()), [None, 4])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllClose(tensor_dict[fields.InputDataFields.groundtruth_weights],
np.ones(2, dtype=np.float32))
@test_util.enable_c_shapes
def testDecodeObjectLabel(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes = [0, 1]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_classes]
.get_shape().as_list()), [2])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelNoText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes = [1, 2]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_classes]
.get_shape().as_list()), [None])
init = tf.tables_initializer()
with self.test_session() as sess:
sess.run(init)
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes_text = ['cat', 'cheetah']
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_classes]
.get_shape().as_list()), [None])
with self.test_session() as sess:
sess.run(tf.tables_initializer())
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([2, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes_text = ['cat', 'dog']
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
display_name:'cat'
}
item {
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_classes]
.get_shape().as_list()), [None])
with self.test_session() as sess:
sess.run(tf.tables_initializer())
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
bbox_classes_text = ['cat', 'dog']
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_classes]
.get_shape().as_list()), [None])
with self.test_session() as sess:
sess.run(tf.tables_initializer())
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
@test_util.enable_c_shapes
def testDecodeObjectArea(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_area = [100., 174.]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/area':
dataset_util.float_list_feature(object_area),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_area]
.get_shape().as_list()), [2])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(object_area,
tensor_dict[fields.InputDataFields.groundtruth_area])
@test_util.enable_c_shapes
def testDecodeObjectIsCrowd(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_is_crowd = [0, 1]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/is_crowd':
dataset_util.int64_list_feature(object_is_crowd),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.groundtruth_is_crowd].get_shape()
.as_list()), [2])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
[bool(item) for item in object_is_crowd],
tensor_dict[fields.InputDataFields.groundtruth_is_crowd])
@test_util.enable_c_shapes
def testDecodeObjectDifficult(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_difficult = [0, 1]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/difficult':
dataset_util.int64_list_feature(object_difficult),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.groundtruth_difficult].get_shape()
.as_list()), [2])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
[bool(item) for item in object_difficult],
tensor_dict[fields.InputDataFields.groundtruth_difficult])
@test_util.enable_c_shapes
def testDecodeObjectGroupOf(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_group_of = [0, 1]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/group_of':
dataset_util.int64_list_feature(object_group_of),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.groundtruth_group_of].get_shape()
.as_list()), [2])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
[bool(item) for item in object_group_of],
tensor_dict[fields.InputDataFields.groundtruth_group_of])
def testDecodeObjectWeight(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
object_weights = [0.75, 1.0]
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/weight':
dataset_util.float_list_feature(object_weights),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_weights]
.get_shape().as_list()), [None])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(object_weights,
tensor_dict[fields.InputDataFields.groundtruth_weights])
@test_util.enable_c_shapes
def testDecodeInstanceSegmentation(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
.get_shape().as_list()), [4, 5, 3])
self.assertAllEqual((tensor_dict[fields.InputDataFields.groundtruth_classes]
.get_shape().as_list()), [4])
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
instance_masks.astype(np.float32),
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(object_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testInstancesNotAvailableByDefault(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertTrue(
fields.InputDataFields.groundtruth_instance_masks not in tensor_dict)
def testDecodeImageLabels(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'),
'image/class/label': dataset_util.int64_list_feature([1, 2]),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertTrue(
fields.InputDataFields.groundtruth_image_classes in tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 2]))
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/class/text':
dataset_util.bytes_list_feature(['dog', 'cat']),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
sess.run(tf.tables_initializer())
tensor_dict = sess.run(tensor_dict)
self.assertTrue(
fields.InputDataFields.groundtruth_image_classes in tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 3]))
if __name__ == '__main__':
tf.test.main()
|
CUDA-Optimized/FastSpeech/fastspeech/trainer | trainer | fastspeech_trainer | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from fastspeech.align_tacotron2 import get_tacotron2, get_duration
from fastspeech.trainer.trainer import Trainer
from fastspeech.utils.pytorch import to_device_async, to_cpu_numpy
from torch.nn import functional as F
class FastspeechTrainer(Trainer):
def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None,
n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp='O0', nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None, pre_aligns=True):
super(FastspeechTrainer, self).__init__(data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn, step, ckpt_path,
log_path, n_epochs, save_steps, log_steps, device, use_amp, nvprof_iter_start, nvprof_iter_end, pyprof_enabled, detect_anomaly, seed)
self.pre_aligns = pre_aligns
if not pre_aligns:
self.tacotron2 = get_tacotron2(device, is_training=True)
to_device_async(self.tacotron2, device)
def loss(self, inputs, model):
text = inputs["text_encoded"]
text_pos = inputs["text_pos"]
mel_tgt = inputs["mel"]
text = to_device_async(text, self.device)
text_pos = to_device_async(text_pos, self.device)
mel_tgt = to_device_async(mel_tgt, self.device)
if self.pre_aligns:
dur_tgt = inputs["align"] # preprocessed align
dur_tgt = dur_tgt.float()
dur_tgt = to_device_async(dur_tgt, self.device)
else:
text_len = inputs['text_len']
mel_len = inputs['mel_len']
dur_tgt = get_duration(
text, text_len, mel_tgt, mel_len, self.tacotron2, self.device)
# (B,H,T) => (B,T,H)
mel_tgt = mel_tgt.transpose(1, 2)
# Forward
mel, mask, dur = model(
text,
text_pos,
duration_target=dur_tgt,
seq_output_len=mel_tgt.size(1))
assert(mel.size(1) == mel_tgt.size(1))
# Loss
mel_loss = F.mse_loss(mel, mel_tgt, reduction='none')
mel_mask = mel_tgt.ne(0).float()
mel_loss *= mel_mask
mel_loss = mel_loss.mean()
dur_tgt = torch.log(dur_tgt + 1)
dur_mask = text_pos.ne(0).float()
dur_tgt *= dur_mask
dur_pred_loss = F.mse_loss(dur, dur_tgt)
loss = mel_loss + dur_pred_loss
meta = {
'mel_loss': to_cpu_numpy(mel_loss),
'duration_predictor_loss': to_cpu_numpy(dur_pred_loss),
}
# meta = {}
return loss, meta
|
TensorFlow/Detection/SSD/models/research/slim | slim | setup | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup script for slim."""
from setuptools import find_packages
from setuptools import setup
setup(
name='slim',
version='0.1',
include_package_data=True,
packages=find_packages(),
description='tf-slim',
)
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/doc | doc | DLRM | # DLRM for TensorFlow 2
This document provides detailed instructions on running DLRM training as well as benchmark results for this model.
## Table Of Contents
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Quick Start Guide](#quick-start-guide)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Training process](#training-process)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 32GB)](#training-accuracy-nvidia-dgx-1-8x-v100-32gb)
* [Training accuracy: NVIDIA DGX-2 (16x V100 32GB)](#training-accuracy-nvidia-dgx-2-16x-v100-32gb)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: comparison with CPU for the "extra large" model](#training-performance-comparison-with-cpu-for-the-extra-large-model)
* [Training performance: NVIDIA DGX-1 (8x V100 32GB)](#training-performance-nvidia-dgx-1-8x-v100-32gb)
* [Training performance: NVIDIA DGX-2 (16x V100 32GB)](#training-performance-nvidia-dgx-2-16x-v100-32gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (8x A100 80GB)](#inference-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Inference performance: NVIDIA DGX1V-32GB (8x V100 32GB)](#inference-performance-nvidia-dgx1v-32gb-8x-v100-32gb)
* [Inference performance: NVIDIA DGX2 (16x V100 16GB)](#inference-performance-nvidia-dgx2-16x-v100-16gb)
## Model overview
The Deep Learning Recommendation Model (DLRM) is a recommendation model designed to make use of both categorical and numerical inputs.
It was first described in [Deep Learning Recommendation Model for Personalization and Recommendation Systems](https://arxiv.org/abs/1906.00091).
This repository provides a reimplementation of the code base provided originally [here](https://github.com/facebookresearch/dlrm).
The scripts enable you to train DLRM on a synthetic dataset or on the [Criteo Terabyte Dataset](https://labs.criteo.com/2013/12/download-terabyte-click-logs/).
For the Criteo 1TB Dataset, we use a slightly different preprocessing procedure than the one found in the original implementation.
Most importantly, we use a technique called frequency thresholding to demonstrate models of different sizes.
The smallest model can be trained on a single V100-32GB GPU, while the largest one needs 8xA100-80GB GPUs.
The table below summarizes the model sizes and frequency thresholds used in this repository, for both the synthetic and real datasets supported.
| Dataset | Frequency Threshold | Final dataset size | Intermediate preprocessing storage required | Suitable for accuracy tests | Total download & preprocess time |GPU Memory required for training | Total embedding size | Number of model parameters |
|:-------|:-------|:-------|:-------------|:-------------------|:-------------------|:-------------------|:-------------------|:-------------------|
| Synthetic T15 |15 | 6 GiB | None | No | ~Minutes | 15.6 GiB | 15.6 GiB | 4.2B |
| Synthetic T3 |3 | 6 GiB | None | No | ~Minutes | 84.9 GiB | 84.9 GiB | 22.8B |
| Synthetic T0 |0 | 6 GiB | None | No | ~Minutes | 421 GiB | 421 GiB | 113B |
| Real Criteo T15 |15 | 370 GiB | ~Terabytes | Yes | ~Hours | 15.6 GiB | 15.6 GiB | 4.2B |
| Real Criteo T3 |3 | 370 GiB | ~Terabytes | Yes | ~Hours | 84.9 GiB | 84.9 GiB | 22.8B |
| Real Criteo T0 |0 | 370 GiB | ~Terabytes | Yes | ~Hours | 421 GiB | 421 GiB | 113B |
You can find a detailed description of the Criteo dataset preprocessing the [preprocessing documentation](./criteo_dataset.md#advanced).
### Model architecture
DLRM accepts two types of features: categorical and numerical. For each categorical feature,
an embedding table is used to provide a dense representation of each unique value.
The dense features enter the model and are transformed by a simple neural network referred to as "Bottom MLP."
This part of the network consists of a series
of linear layers with ReLU activations. The output of the bottom MLP and the embedding vectors are then fed into the
"dot interaction" operation. The output of "dot interaction" is then concatenated
with the features resulting from the bottom MLP and fed
into the "top MLP," which is a series of dense layers with activations.
The model outputs a single number which can be interpreted as a likelihood of a certain user clicking an ad.
<p align="center">
<img width="100%" src="./img/dlrm_singlegpu_architecture.svg" />
<br>
Figure 1. The architecture of DLRM.
</p>
## Quick Start Guide
To train DLRM perform the following steps.
For the specifics concerning training and inference,
refer to the [Advanced](../README.md#advanced) section.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/TensorFlow2/Recommendation/DLRM
```
2. Build and run a DLRM Docker container.
```bash
docker build -t train_docker_image .
docker run --cap-add SYS_NICE --runtime=nvidia -it --rm --ipc=host -v ${PWD}/data:/data train_docker_image bash
```
3. Generate a synthetic dataset.
Downloading and preprocessing the Criteo 1TB dataset requires a lot of time and disk space.
Because of this we provide a synthetic dataset generator that roughly matches Criteo 1TB characteristics.
This will enable you to benchmark quickly.
If you prefer to benchmark on the real data, please follow [these instructions](./criteo_dataset.md#quick-start-guide)
to download and preprocess the dataset.
```bash
python -m dataloading.generate_feature_spec --variant criteo_t15_synthetic --dst feature_spec.yaml
python -m dataloading.transcribe --src_dataset_type synthetic --src_dataset_path . \
--dst_dataset_path /data/preprocessed --max_batches_train 1000 --max_batches_test 100 --dst_dataset_type tf_raw
```
4. Verify the input data:
After running `tree /data/preprocessed` you should see the following directory structure:
```bash
$ tree /data/preprocessed
/data/preprocessed
βββ feature_spec.yaml
βββ test
βΒ Β βββ cat_0.bin
βΒ Β βββ cat_1.bin
βΒ Β βββ ...
βΒ Β βββ label.bin
βΒ Β βββ numerical.bin
βββ train
βββ cat_0.bin
βββ cat_1.bin
βββ ...
βββ label.bin
βββ numerical.bin
2 directories, 57 files
```
5. Start training.
- single-GPU:
```bash
horovodrun -np 1 -H localhost:1 --mpi-args=--oversubscribe numactl --interleave=all -- python -u dlrm.py --dataset_path /data/preprocessed --amp --xla --save_checkpoint_path /data/checkpoint/
```
- multi-GPU:
```bash
horovodrun -np 8 -H localhost:8 --mpi-args=--oversubscribe numactl --interleave=all -- python -u dlrm.py --dataset_path /data/preprocessed --amp --xla --save_checkpoint_path /data/checkpoint/
```
6. Start evaluation.
To evaluate a previously trained checkpoint, append `--restore_checkpoint_path <path> --mode eval` to the command used for training. For example, to test a checkpoint trained on 8xA100 80GB, run:
```bash
horovodrun -np 8 -H localhost:8 --mpi-args=--oversubscribe numactl --interleave=all -- python -u dlrm.py --dataset_path /data/preprocessed --amp --xla --restore_checkpoint_path /data/checkpoint --mode eval
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIAβs latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark the training performance on a specific batch size, follow the instructions
in the [Quick Start Guide](#quick-start-guide). You can also add the `--max_steps 1000`
if you want to get a reliable throughput measurement without running the entire training.
You can also use synthetic data by running with the `--dataset_type synthetic` option if you haven't downloaded the dataset yet.
#### Inference performance benchmark
To benchmark the inference performance on a specific batch size, run:
```
horovodrun -np 1 -H localhost:1 --mpi-args=--oversubscribe numactl --interleave=all -- python -u dlrm.py --dataset_path /data/preprocessed/ --amp --restore_checkpoint_path <checkpoint_path> --mode inference
```
### Training process
The main training scripts resides in `dlrm.py`. The training speed is measured by throughput, i.e.,
the number of samples processed per second.
We use mixed precision training with static loss scaling for the bottom and top MLPs
while embedding tables are stored in FP32 format.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
We used three model size variants to show memory scalability in a multi-GPU setup
(4.2B params, 22.8B params, and 113B params). Refer to the [Model overview](#model-overview) section for detailed
information about the model variants.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running training scripts as described in the Quick Start Guide in the DLRM Docker container.
| GPUs | Model size | Batch size / GPU | Accuracy (AUC) - TF32 | Accuracy (AUC) - mixed precision | Time to train - TF32 [minutes] | Time to train - mixed precision [minutes] | Time to train speedup (TF32 to mixed precision) |
|:-------|:-------------|:-------------------|:------------------------|:-----------------------------------|:---------------------------------|:--------------------------------------------|:--------------------------------------------------|
| 1 | small | 64k | 0.8025 | 0.8025 | 26.75 | 16.27 | 1.64 |
| 8 | large | 8k | 0.8027 | 0.8026 | 8.77 | 6.57 | 1.33 |
| 8 | extra large | 8k | 0.8026 | 0.8026 | 10.47 | 9.08 | 1.15 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 32GB)
Our results were obtained by running training scripts as described in the Quick Start Guide in the DLRM Docker container.
| GPUs | Model size | Batch size / GPU | Accuracy (AUC) - FP32 | Accuracy (AUC) - mixed precision | Time to train - FP32 [minutes] | Time to train - mixed precision [minutes] | Time to train speedup (FP32 to mixed precision) |
|:-------|:-------------|:-------------------|:------------------------|:-----------------------------------|:---------------------------------|:--------------------------------------------|:--------------------------------------------------|
| 1 | small | 64k | 0.8027 | 0.8025 | 109.63 | 34.83 | 3.15 |
| 8 | large | 8k | 0.8028 | 0.8026 | 26.01 | 13.73 | 1.89 |
##### Training accuracy: NVIDIA DGX-2 (16x V100 32GB)
Our results were obtained by running training scripts as described in the Quick Start Guide in the DLRM Docker container.
| GPUs | Model size | Batch size / GPU | Accuracy (AUC) - FP32 | Accuracy (AUC) - mixed precision | Time to train - FP32 [minutes] | Time to train - mixed precision [minutes] | Time to train speedup (FP32 to mixed precision) |
|:-------|:-------------|:-------------------|:------------------------|:-----------------------------------|:---------------------------------|:--------------------------------------------|:--------------------------------------------------|
| 1 | small | 64k | 0.8026 | 0.8026 | 105.13 | 33.37 | 3.15 |
| 8 | large | 8k | 0.8027 | 0.8027 | 21.21 | 11.43 | 1.86 |
| 16 | large | 4k | 0.8025 | 0.8026 | 15.52 | 10.88 | 1.43 |
##### Training stability test
The histograms below show the distribution of ROC AUC results achieved at the end of the training for each precision/hardware platform tested. No statistically significant differences exist between precision, number of GPUs, or hardware platform. Using the larger dataset has a modest, positive impact on the final AUC score.
<p align="center">
<img width="100%" src="img/dlrm_histograms.svg" />
<br>
Figure 4. Results of stability tests for DLRM.
</p>
#### Training performance results
We used throughput in items processed per second as the performance metric.
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by following the commands from the Quick Start Guide
in the DLRM Docker container on NVIDIA DGX A100 (8x A100 80GB) GPUs. Performance numbers (in items per second) were averaged over 1000 training steps.
| GPUs | Model size | Batch size / GPU | Throughput - TF32 | Throughput - mixed precision | Throughput speedup (TF32 to mixed precision) |
|:-------|:-------------|:-------------------|:--------------------|:-------------------------------|:-----------------------------------------------|
| 1 | small | 64k | 2.84M | 4.55M | 1.60 |
| 8 | large | 8k | 10.9M | 13.8M | 1.27 |
| 8 | extra large | 8k | 9.76M | 11.5M | 1.17
To achieve these results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: comparison with CPU for the "extra large" model
For the "extra large" model (113B parameters), we also obtained CPU results for comparison using the same source code
(using the `--cpu` command line flag for the CPU-only experiments).
We compare three hardware setups:
- CPU only,
- a single GPU that uses CPU memory for the largest embedding tables,
- Hybrid-Parallel using the full DGX A100-80GB
| Hardware | Throughput [samples / second]| Speedup over CPU|
|:---|:---|:---|
2xAMD EPYC 7742 | 17.7k | 1x |
1xA100-80GB + 2xAMD EPYC 7742 (large embeddings on CPU) | 768k |43x |
DGX A100 (8xA100-80GB) (hybrid parallel) | 11.5M | 649x |
##### Training performance: NVIDIA DGX-1 (8x V100 32GB)
| GPUs | Model size | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 to mixed precision) |
|:-------|:-------------|:-------------------|:--------------------|:-------------------------------|:-----------------------------------------------|
| 1 | small | 64k | 0.663M | 2.23M | 3.37 |
| 8 | large | 8k | 3.13M | 6.31M | 2.02 |
To achieve the same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX-2 (16x V100 32GB)
| GPUs | Model size | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 to mixed precision) |
|:-------|:-------------|:-------------------|:--------------------|:-------------------------------|:-----------------------------------------------|
| 1 | small | 64k | 0.698M | 2.44M | 3.49 |
| 8 | large | 8k | 3.79M | 7.82M | 2.06 |
| 16 | large | 4k | 6.43M | 10.5M | 1.64 |
To achieve the same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (8x A100 80GB)
| GPUs | Model size | Batch size / GPU | Throughput - TF32 | Throughput - mixed precision | Average latency - TF32 [ms] | Average latency - mixed precision [ms] | Throughput speedup (mixed precision to TF32) |
|-------:|:-------------|-------------------:|:--------------------|:-------------------------------|------------------------------:|-----------------------------------------:|-----------------------------------------------:|
| 1 | small | 2048 | 1.38M | 1.48M | 1.49 | 1.38 | 1.07 |
##### Inference performance: NVIDIA DGX1V-32GB (8x V100 32GB)
| GPUs | Model size | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Average latency - FP32 [ms] | Average latency - mixed precision [ms] | Throughput speedup (mixed precision to FP32) |
|-------:|:-------------|-------------------:|:--------------------|:-------------------------------|------------------------------:|-----------------------------------------:|-----------------------------------------------:|
| 1 | small | 2048 | 0.871M | 0.951M | 2.35 | 2.15 | 1.09 |
##### Inference performance: NVIDIA DGX2 (16x V100 16GB)
| GPUs | Model size | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Average latency - FP32 [ms] | Average latency - mixed precision [ms] | Throughput speedup (mixed precision to FP32) |
|-------:|:-------------|-------------------:|:--------------------|:-------------------------------|------------------------------:|-----------------------------------------:|-----------------------------------------------:|
| 1 | small | 2048 | 1.15M | 1.37M | 1.78 | 1.50 | 1.19 |
|
PyTorch/Recommendation/NCF | NCF | prepare_dataset | # Copyright (c) 2018, deepakn94, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -e
set -x
DATASET_NAME=${1:-'ml-20m'}
RAW_DATADIR=${2:-"/data/${DATASET_NAME}"}
CACHED_DATADIR=${3:-"/data/cache/${DATASET_NAME}"}
# you can add another option to this case in order to support other datasets
case ${DATASET_NAME} in
'ml-20m')
ZIP_PATH=${RAW_DATADIR}/'ml-20m.zip'
SHOULD_UNZIP=1
RATINGS_PATH=${RAW_DATADIR}'/ml-20m/ratings.csv'
;;
'ml-1m')
ZIP_PATH=${RAW_DATADIR}/'ml-1m.zip'
SHOULD_UNZIP=1
RATINGS_PATH=${RAW_DATADIR}'/ml-1m/ratings.dat'
;;
*)
echo "Using unknown dataset: $DATASET_NAME."
RATINGS_PATH=${RAW_DATADIR}'/ratings.csv'
echo "Expecting file at ${RATINGS_PATH}"
SHOULD_UNZIP=0
esac
if [ ! -d ${RAW_DATADIR} ]; then
mkdir -p ${RAW_DATADIR}
fi
if [ ! -d ${CACHED_DATADIR} ]; then
mkdir -p ${CACHED_DATADIR}
fi
if [ -f log ]; then
rm -f log
fi
if [ ! -f ${RATINGS_PATH} ]; then
if [ $SHOULD_UNZIP == 1 ]; then
if [ ! -f ${ZIP_PATH} ]; then
echo "Dataset not found. Please download it from: https://grouplens.org/datasets/movielens/20m/ and put it in ${ZIP_PATH}"
exit 1
fi
unzip -u ${ZIP_PATH} -d ${RAW_DATADIR}
else
echo "File not found at ${RATINGS_PATH}. Aborting."
exit 1
fi
fi
if [ ! -f ${CACHED_DATADIR}/feature_spec.yaml ]; then
echo "preprocessing ${RATINGS_PATH} and save to disk"
t0=$(date +%s)
python convert.py --path ${RATINGS_PATH} --output ${CACHED_DATADIR}
t1=$(date +%s)
delta=$(( $t1 - $t0 ))
echo "Finish preprocessing in $delta seconds"
else
echo 'Using cached preprocessed data'
fi
echo "Dataset $DATASET_NAME successfully prepared at: $CACHED_DATADIR"
echo "You can now run the training with: python -m torch.distributed.launch --nproc_per_node=<number_of_GPUs> --use_env ncf.py --data ${CACHED_DATADIR}"
|
TensorFlow2/Recommendation/SIM/sim/utils | utils | losses | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def build_sim_loss_fn(alpha=1.0, beta=1.0):
cross_entropy_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def sim_loss_fn(targets, gsu_logits, esu_logits):
gsu_loss = cross_entropy_loss(targets, gsu_logits)
esu_loss = cross_entropy_loss(targets, esu_logits)
return 0.5 * (alpha * gsu_loss + beta * esu_loss)
return sim_loss_fn
@tf.function
def dien_auxiliary_loss_fn(click_probs, noclick_probs, mask=None):
if mask is None:
mask = tf.ones_like(click_probs)
click_loss_term = -tf.math.log(click_probs) * mask
noclick_loss_term = -tf.math.log(1.0 - noclick_probs) * mask
return tf.reduce_mean(click_loss_term + noclick_loss_term)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.