relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
PyTorch/LanguageModeling/Transformer-XL/pytorch | pytorch | run_text8_large | #!/bin/bash
export OMP_NUM_THREADS=1
if [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--cuda \
--data ../data/text8/ \
--dataset text8 \
--n_layer 24 \
--d_model 1024 \
--n_head 8 \
--d_head 128 \
--d_inner 3072 \
--dropout 0.15 \
--dropatt 0.15 \
--optim adam \
--lr 0.00025 \
--tgt_len 768 \
--mem_len 768 \
--eval_tgt_len 128 \
--batch_size 64 \
--max_step 400000 \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python eval.py \
--cuda \
--data ../data/text8/ \
--dataset text8 \
--tgt_len 128 \
--mem_len 3800 \
--clamp_len 1000 \
--same_length \
--split test \
${@:2}
else
echo 'unknown argment 1'
fi
|
PyTorch/Recommendation/DLRM/dlrm/data | data | utils | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Tuple, Optional, List
import numpy as np
import torch
from torch import Tensor
from torch.cuda import Stream
from torch.utils.data import Dataset, DataLoader
import tqdm
from dlrm.data.defaults import TRAIN_MAPPING, TEST_MAPPING, DTYPE_SELECTOR
from dlrm.data.feature_spec import FeatureSpec
def collate_split_tensors(
tensors: Tuple[Tensor, Tensor, Tensor],
device: str,
orig_stream: Stream,
numerical_type: torch.dtype = torch.float32
):
tensors = [tensor.to(device, non_blocking=True) if tensor is not None else None for tensor in
tensors]
if device == 'cuda':
for tensor in tensors:
if tensor is not None:
tensor.record_stream(orig_stream)
numerical_features, categorical_features, click = tensors
if numerical_features is not None:
numerical_features = numerical_features.to(numerical_type)
return numerical_features, categorical_features, click
def collate_array(
array: np.array,
device: str,
orig_stream: Stream,
num_numerical_features: int,
selected_categorical_features: Optional[Tensor] = None
):
# numerical features are encoded as float32
numerical_features = array[:, 1:1 + num_numerical_features].view(dtype=np.float32)
numerical_features = torch.from_numpy(numerical_features)
categorical_features = torch.from_numpy(array[:, 1 + num_numerical_features:])
click = torch.from_numpy(array[:, 0])
categorical_features = categorical_features.to(device, non_blocking=True).to(torch.long)
numerical_features = numerical_features.to(device, non_blocking=True)
click = click.to(torch.float32).to(device, non_blocking=True)
if selected_categorical_features is not None:
categorical_features = categorical_features[:, selected_categorical_features]
if device == 'cuda':
numerical_features.record_stream(orig_stream)
categorical_features.record_stream(orig_stream)
click.record_stream(orig_stream)
return numerical_features, categorical_features, click
def write_dataset_to_disk(dataset_train: Dataset, dataset_test: Dataset, feature_spec: FeatureSpec,
saving_batch_size=512) -> None:
feature_spec.check_feature_spec() # We rely on the feature spec being properly formatted
categorical_features_list = feature_spec.get_categorical_feature_names()
categorical_features_types = [feature_spec.feature_spec[feature_name][DTYPE_SELECTOR]
for feature_name in categorical_features_list]
number_of_numerical_features = feature_spec.get_number_of_numerical_features()
number_of_categorical_features = len(categorical_features_list)
for mapping_name, dataset in zip((TRAIN_MAPPING, TEST_MAPPING),
(dataset_train, dataset_test)):
file_streams = []
label_path, numerical_path, categorical_paths = feature_spec.get_mapping_paths(mapping_name)
try:
os.makedirs(os.path.dirname(numerical_path), exist_ok=True)
numerical_f = open(numerical_path, "wb+")
file_streams.append(numerical_f)
os.makedirs(os.path.dirname(label_path), exist_ok=True)
label_f = open(label_path, 'wb+')
file_streams.append(label_f)
categorical_fs = []
for feature_name in categorical_features_list:
local_path = categorical_paths[feature_name]
os.makedirs(os.path.dirname(local_path), exist_ok=True)
fs = open(local_path, 'wb+')
categorical_fs.append(fs)
file_streams.append(fs)
for numerical, categorical, label in tqdm.tqdm(
DataLoader(dataset, saving_batch_size),
desc=mapping_name + " dataset saving",
unit_scale=saving_batch_size
):
assert (numerical.shape[-1] == number_of_numerical_features)
assert (categorical.shape[-1] == number_of_categorical_features)
numerical_f.write(numerical.to(torch.float16).cpu().numpy().tobytes())
label_f.write(label.to(torch.bool).cpu().numpy().tobytes())
for cat_idx, cat_feature_type in enumerate(categorical_features_types):
categorical_fs[cat_idx].write(
categorical[:, :, cat_idx].cpu().numpy().astype(cat_feature_type).tobytes())
finally:
for stream in file_streams:
stream.close()
feature_spec.to_yaml()
def prefetcher(load_iterator, prefetch_stream):
def _prefetch():
with torch.cuda.stream(prefetch_stream):
try:
data_batch = next(load_iterator)
except StopIteration:
return None
return data_batch
next_data_batch = _prefetch()
while next_data_batch is not None:
torch.cuda.current_stream().wait_stream(prefetch_stream)
data_batch = next_data_batch
next_data_batch = _prefetch()
yield data_batch
def get_embedding_sizes(fspec: FeatureSpec, max_table_size: Optional[int]) -> List[int]:
if max_table_size is not None:
return [min(s, max_table_size) for s in fspec.get_categorical_sizes()]
else:
return fspec.get_categorical_sizes()
|
PyTorch/SpeechRecognition/wav2vec2/wav2vec2 | wav2vec2 | criterion | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import editdistance
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from common.fairseq import utils
from common.fairseq.data.data_utils import post_process
from common.utils import AttrDict
class Wav2vecCriterion(_Loss):
def __init__(self, args):
super().__init__(args)
self.infonce = args.infonce
self.loss_weights = args.loss_weights
self.log_keys = [] if args.log_keys is None else args.log_keys
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"],
sub_batch_sizes=sample["sub_batch_sizes"],
sub_batch_lens=sample["sub_batch_lens"])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
weights = None
if hasattr(model, "get_target_weights") and not self.infonce:
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
reduction = "sum" if reduce else "none"
if self.infonce:
loss = F.cross_entropy(logits, target, reduction=reduction)
else:
loss = F.binary_cross_entropy_with_logits(
logits, target.float(), weights, reduction=reduction
)
if 'sample_size' in sample:
sample_size = sample['sample_size']
elif 'mask_indices' in sample['net_input']:
sample_size = sample['net_input']['mask_indices'].sum()
elif self.infonce:
sample_size = target.numel()
else:
sample_size = target.long().sum().item()
losses.append(loss.detach().clone())
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(self.loss_weights), \
f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, coef in zip(extra_losses, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
losses.append(p)
log_out = {
"loss": loss.item() if reduce else loss.detach(),
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
for lk in self.log_keys:
# Only store "logits" and "target" for computing MAP and MAUC
# during validation
if lk == "logits":
if not self.training:
log_out["logits"] = logits.cpu().numpy()
elif lk == "target":
if not self.training:
# If the targets have been mixed with the predictions of
# teacher models, find the original targets
if hasattr(model, "get_original_targets"):
original_target = model.get_original_targets(
sample, net_output)
else:
original_target = target
log_out["target"] = original_target.cpu().numpy()
elif lk in net_output:
log_out[lk] = float(net_output[lk])
if len(losses) > 1:
for i, l in enumerate(losses):
log_out[f"loss_{i}"] = l.item()
if self.infonce:
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
assert logits.dim() > 1, logits.shape
max_ = logits.argmax(-1) == 0
min_ = logits.argmin(-1) == 0
both = max_ & min_
corr = max_.long().sum().item() - both.long().sum().item()
count = float(max_.numel())
log_out["correct"] = corr
log_out["count"] = count
return loss, sample_size, log_out
class CTCCriterion(_Loss):
def __init__(self, target_dictionary, blank_idx=0, pad_idx=1, eos_idx=2,
zero_infinity=True, sentence_avg=True, post_process='letter'):
super().__init__()
# keep all indexes for compatibility with fairseq
self.blank_idx = blank_idx
self.pad_idx = target_dictionary.pad()
self.eos_idx = target_dictionary.eos()
assert self.blank_idx != self.pad_idx != self.eos_idx
self.target_dictionary = target_dictionary
self.zero_infinity = zero_infinity
self.sentence_avg = sentence_avg
self.post_process = post_process
# currently we don't support decoders (e.g., KenLM)
self.w2l_decoder = None
def forward(self, model, sample, reduce=True):
net_out = model(**sample["net_input"])
logp = model.get_normalized_probs(
net_out["encoder_out"], net_out["padding_mask"], log_probs=True
).contiguous()
T, B, _ = logp.size()
if net_out["padding_mask"] is not None:
lens = (~net_out["padding_mask"]).long().sum(-1)
else:
lens = logp.new_full((B,), T, dtype=torch.long)
tgt = sample["target"]
pad_mask = (tgt != self.pad_idx) & (tgt != self.eos_idx)
tgt_flat = tgt.masked_select(pad_mask)
tgt_lens = sample["target_lengths"]
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(logp, tgt_flat, lens, tgt_lens,
blank=self.blank_idx, reduction="sum",
zero_infinity=self.zero_infinity)
log_out = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["id"].numel(),
"sample_size": B if self.sentence_avg else sample["ntokens"]
}
if not model.training:
log_out.update(self.calculate_wer(sample, logp, lens))
return loss, log_out['sample_size'], log_out
def calculate_wer(self, sample, logp, lens):
with torch.no_grad():
log = AttrDict({"wv_errs": 0, "w_errs": 0, "w_len": 0,
"c_errs": 0, "c_len": 0})
logp_t = logp.transpose(0, 1).float().contiguous().cpu()
tgt_labels = sample.get('target_label', sample['target'])
head = lambda l: None if l is None or len(l) < 1 else l[0]
for lp, L, tgt in zip(logp_t, lens, tgt_labels):
lp = lp[:L].unsqueeze(0)
if self.w2l_decoder is not None:
decoded = head(head(self.w2l_decoder.decode(lp)))
else:
decoded = None
mask = (tgt != self.pad_idx) & (tgt != self.eos_idx)
tgt_units = self.target_dictionary.string(tgt[mask])
tgt_units_arr = tgt[mask].tolist()
toks = lp.argmax(dim=-1).unique_consecutive()
pred_units_arr = toks[toks != self.blank_idx].tolist()
log.c_errs += editdistance.eval(pred_units_arr, tgt_units_arr)
log.c_len += len(tgt_units_arr)
tgt_words = post_process(tgt_units, self.post_process).split()
pred_units = self.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units,
self.post_process).split()
if decoded is not None and "words" in decoded:
pred_words = decoded["words"]
log.w_errs += editdistance.eval(pred_words, tgt_words)
log.wv_errs += editdistance.eval(pred_words_raw, tgt_words)
else:
dist = editdistance.eval(pred_words_raw, tgt_words)
log.w_errs += dist
log.wv_errs += dist
log.w_len += len(tgt_words)
return vars(log)
|
TensorFlow/Segmentation/UNet_Industrial/scripts | scripts | UNet_AMP_4GPU | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet training in FP32-AMP on 4 GPUs using 16 batch size (4 per GPU)
# Usage ./UNet_AMP_4GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
mpirun \
-np 4 \
-H localhost:4 \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=VERSION \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 -mca btl ^openib \
--allow-run-as-root \
python "${BASEDIR}/../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='train_and_evaluate' \
--iter_unit='batch' \
--num_iter=2500 \
--batch_size=4 \
--warmup_step=10 \
--results_dir="${1}" \
--data_dir="${2}" \
--dataset_name='DAGM2007' \
--dataset_classID="${3}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--amp \
--xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=250 \
--debug_verbosity=0
|
PyTorch/SpeechRecognition/Jasper/common/dali | dali | iterator | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
import numpy as np
from common.helpers import print_once
from common.text import _clean_text, punctuation_map
def normalize_string(s, symbols, punct_map):
"""
Normalizes string.
Example:
'call me at 8:00 pm!' -> 'call me at eight zero pm'
"""
labels = set(symbols)
try:
text = _clean_text(s, ["english_cleaners"], punct_map).strip()
return ''.join([tok for tok in text if all(t in labels for t in tok)])
except Exception as e:
print_once("WARNING: Normalizing failed: {s} {e}")
class DaliJasperIterator(object):
"""
Returns batches of data for Jasper training:
preprocessed_signal, preprocessed_signal_length, transcript, transcript_length
This iterator is not meant to be the entry point to Dali processing pipeline.
Use DataLoader instead.
"""
def __init__(self, dali_pipelines, transcripts, symbols, batch_size, reader_name, train_iterator: bool):
self.transcripts = transcripts
self.symbols = symbols
self.batch_size = batch_size
from nvidia.dali.plugin.pytorch import DALIGenericIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
self.dali_it = DALIGenericIterator(
dali_pipelines, ["audio", "label", "audio_shape"], reader_name=reader_name,
dynamic_shape=True, auto_reset=True,
last_batch_policy=(LastBatchPolicy.DROP if train_iterator else LastBatchPolicy.PARTIAL))
@staticmethod
def _str2list(s: str):
"""
Returns list of floats, that represents given string.
'0.' denotes separator
'1.' denotes 'a'
'27.' denotes "'"
Assumes, that the string is lower case.
"""
list = []
for c in s:
if c == "'":
list.append(27.)
else:
list.append(max(0., ord(c) - 96.))
return list
@staticmethod
def _pad_lists(lists: list, pad_val=0):
"""
Pads lists, so that all have the same size.
Returns list with actual sizes of corresponding input lists
"""
max_length = 0
sizes = []
for li in lists:
sizes.append(len(li))
max_length = max_length if len(li) < max_length else len(li)
for li in lists:
li += [pad_val] * (max_length - len(li))
return sizes
def _gen_transcripts(self, labels, normalize_transcripts: bool = True):
"""
Generate transcripts in format expected by NN
"""
lists = [
self._str2list(normalize_string(self.transcripts[lab.item()], self.symbols, punctuation_map(self.symbols)))
for lab in labels
] if normalize_transcripts else [self._str2list(self.transcripts[lab.item()]) for lab in labels]
sizes = self._pad_lists(lists)
return torch.tensor(lists).cuda(), torch.tensor(sizes, dtype=torch.int32).cuda()
def __next__(self):
data = self.dali_it.__next__()
transcripts, transcripts_lengths = self._gen_transcripts(data[0]["label"])
return data[0]["audio"], data[0]["audio_shape"][:, 1], transcripts, transcripts_lengths
def next(self):
return self.__next__()
def __iter__(self):
return self
# TODO: refactor
class SyntheticDataIterator(object):
def __init__(self, batch_size, nfeatures, feat_min=-5., feat_max=0., txt_min=0., txt_max=23., feat_lens_max=1760,
txt_lens_max=231, regenerate=False):
"""
Args:
batch_size
nfeatures: number of features for melfbanks
feat_min: minimum value in `feat` tensor, used for randomization
feat_max: maximum value in `feat` tensor, used for randomization
txt_min: minimum value in `txt` tensor, used for randomization
txt_max: maximum value in `txt` tensor, used for randomization
regenerate: If True, regenerate random tensors for every iterator step.
If False, generate them only at start.
"""
self.batch_size = batch_size
self.nfeatures = nfeatures
self.feat_min = feat_min
self.feat_max = feat_max
self.feat_lens_max = feat_lens_max
self.txt_min = txt_min
self.txt_max = txt_max
self.txt_lens_max = txt_lens_max
self.regenerate = regenerate
if not self.regenerate:
self.feat, self.feat_lens, self.txt, self.txt_lens = self._generate_sample()
def _generate_sample(self):
feat = (self.feat_max - self.feat_min) * np.random.random_sample(
(self.batch_size, self.nfeatures, self.feat_lens_max)) + self.feat_min
feat_lens = np.random.randint(0, int(self.feat_lens_max) - 1, size=self.batch_size)
txt = (self.txt_max - self.txt_min) * np.random.random_sample(
(self.batch_size, self.txt_lens_max)) + self.txt_min
txt_lens = np.random.randint(0, int(self.txt_lens_max) - 1, size=self.batch_size)
return torch.Tensor(feat).cuda(), \
torch.Tensor(feat_lens).cuda(), \
torch.Tensor(txt).cuda(), \
torch.Tensor(txt_lens).cuda()
def __next__(self):
if self.regenerate:
return self._generate_sample()
return self.feat, self.feat_lens, self.txt, self.txt_lens
def next(self):
return self.__next__()
def __iter__(self):
return self
|
PyTorch/SpeechSynthesis/HiFiGAN/platform | platform | DGX1_HiFi-GAN_AMP_8GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=8}
: ${BATCH_SIZE:=16}
: ${GRAD_ACCUMULATION:=1}
: ${AMP:=true}
bash scripts/train_lj22khz.sh "$@"
|
PyTorch/Classification/ConvNets/triton | triton | convert_model | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
`convert_model.py` script allows to convert between model formats with additional model optimizations
for faster inference.
It converts model from results of get_model function.
Currently supported input and output formats are:
- inputs
- `tf-estimator` - `get_model` function returning Tensorflow Estimator
- `tf-keras` - `get_model` function returning Tensorflow Keras Model
- `tf-savedmodel` - Tensorflow SavedModel binary
- `pyt` - `get_model` function returning PyTorch Module
- output
- `tf-savedmodel` - Tensorflow saved model
- `tf-trt` - TF-TRT saved model
- `ts-trace` - PyTorch traced ScriptModule
- `ts-script` - PyTorch scripted ScriptModule
- `onnx` - ONNX
- `trt` - TensorRT plan file
For tf-keras input you can use:
- --large-model flag - helps loading model which exceeds maximum protobuf size of 2GB
- --tf-allow-growth flag - control limiting GPU memory growth feature
(https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled.
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import (
DATALOADER_FN_NAME,
BaseConverter,
BaseLoader,
BaseSaver,
Format,
Precision,
load_from_file,
)
from .deployment_toolkit.extensions import converters, loaders, savers
LOGGER = logging.getLogger("convert_model")
INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.TF_SAVEDMODEL, Format.PYT]
OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TF_TRT, Format.ONNX, Format.TRT, Format.TS_TRACE, Format.TS_SCRIPT]
def _get_args():
parser = argparse.ArgumentParser(description="Script for conversion between model formats.", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model file (python module or binary file)", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter is not None:
ArgParserGenerator(Converter).update_argparser(parser)
Saver: BaseSaver = savers.get(args.output_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
requested_model_precision = Precision(args.precision)
dataloader_fn = None
# if conversion is required, temporary change model load precision to that required by converter
# it is for TensorRT converters which require fp32 models for all requested precisions
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter:
args.precision = Converter.required_source_model_precision(requested_model_precision).value
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
model = loader.load(args.input_path)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if Converter: # if conversion is needed
# dataloader must much source model precision - so not recovering it yet
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
# recover precision to that requested by user
args.precision = requested_model_precision.value
if Converter:
converter = ArgParserGenerator(Converter).from_args(args)
model = converter.convert(model, dataloader_fn=dataloader_fn)
Saver: BaseSaver = savers.get(args.output_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path)
return 0
if __name__ == "__main__":
main()
|
PyTorch/LanguageModeling/Transformer-XL | Transformer-XL | prep_text8 | #!/usr/bin/env python
# coding=utf-8
# BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import zipfile
from io import open
if os.path.exists('train.txt'):
print('Tokenized text8 already exists - skipping processing')
sys.exit()
data = zipfile.ZipFile('text8.zip').extractall()
data = open('text8', 'r', encoding='utf-8').read()
print('Length of text8: {}'.format(len(data)))
num_test_chars = 5000000
train_data = data[: -2 * num_test_chars]
valid_data = data[-2 * num_test_chars: -num_test_chars]
test_data = data[-num_test_chars:]
for fn, part in [('train.txt', train_data), ('valid.txt', valid_data), ('test.txt', test_data)]:
print('{} will have {} bytes'.format(fn, len(part)))
print('- Tokenizing...')
# Change space ' ' to underscore '_'
part_str = ' '.join(['_' if c == ' ' else c for c in part.strip()])
print('- Writing...')
f = open(fn, 'w').write(part_str)
f = open(fn + '.raw', 'w', encoding='utf-8').write(part)
|
TensorFlow/Classification/ConvNets/resnet50v1.5/training | training | training_perf | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAX_FP32_BS=${1:-128}
MAX_AMP_BS=${2:-256}
GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader | uniq)
GPU_COUNT=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader | wc -l)
function run_benchmark() {
BATCH_SIZE=$1
MODE_SIZE=$2
if [[ $4 -eq "1" ]]; then
XLA="--xla"
else
XLA=""
fi
case $2 in
"amp") MODE_FLAGS="--amp --static_loss_scale 128";;
"fp32"|"tf32") MODE_FLAGS="";;
*) echo "Unsupported configuration, use amp, tf32 or fp32";;
esac
CMD_LINE="--mode=training_benchmark --warmup_steps 200 --num_iter 500 --iter_unit batch --batch_size $BATCH_SIZE \
--data_dir=/data/tfrecords/ --results_dir=/tmp/result $MODE_FLAGS $XLA"
mkdir -p /tmp/result/
if [[ $3 -eq "1" ]]; then
python ./main.py ${CMD_LINE} > /tmp/result/logs.txt
else
mpiexec --allow-run-as-root --bind-to socket -np $3 python3 main.py ${CMD_LINE} > /tmp/result/logs.txt
fi
tail -n1 /tmp/result/logs.txt | sed \
's/^DLL \([0-9]*-\)*[0-9]* \([0-9]*:\)*[0-9]*.[0-9]* - ()/BS='$BATCH_SIZE','$2',XLA='$4'/' >> ./training_benchmark.txt
rm -rf /tmp/result
}
run_benchmark $MAX_AMP_BS amp 1 0
run_benchmark $MAX_AMP_BS amp 1 1
run_benchmark $MAX_FP32_BS fp32 1 0
run_benchmark $MAX_FP32_BS fp32 1 1
if [[ $GPU_COUNT -ne "1" ]]; then
run_benchmark $MAX_AMP_BS amp $GPU_COUNT 0
run_benchmark $MAX_AMP_BS amp $GPU_COUNT 1
run_benchmark $MAX_FP32_BS fp32 $GPU_COUNT 0
run_benchmark $MAX_FP32_BS fp32 $GPU_COUNT 1
fi
cat ./training_benchmark.txt |
TensorFlow2/Recommendation/WideAndDeep/tests/feature_specs | feature_specs | no_numerical | channel_spec:
label:
- clicked
map: []
multihot_categorical:
- topic_id_list
- entity_id_list
- category_id_list
numerical: []
onehot_categorical:
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
feature_spec:
ad_id:
cardinality: 250000
advertiser_id:
cardinality: 2500
campaign_id:
cardinality: 5000
category_id_list:
cardinality: 100
max_hotness: 3
clicked: {}
document_id:
cardinality: 300000
document_id_promo:
cardinality: 100000
entity_id_list:
cardinality: 10000
max_hotness: 3
geo_location:
cardinality: 2500
geo_location_country:
cardinality: 300
geo_location_state:
cardinality: 2000
platform:
cardinality: 4
publisher_id:
cardinality: 1000
publisher_id_promo:
cardinality: 1000
source_id:
cardinality: 4000
source_id_promo:
cardinality: 4000
topic_id_list:
cardinality: 350
max_hotness: 3
metadata: {}
source_spec:
test:
- features:
- clicked
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
- topic_id_list
- entity_id_list
- category_id_list
files:
- valid.csv
type: csv
train:
- features:
- clicked
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
- topic_id_list
- entity_id_list
- category_id_list
files:
- train.csv
type: csv
|
PyTorch/DrugDiscovery/MoFlow/scripts | scripts | benchmark_inference | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bs=${1:-512}
prec=${2:-amp}
flags="${@:3}"
cmd="python \
/workspace/moflow_pyt/moflow/runtime/generate.py \
--batch_size ${bs} \
--steps 200 \
--warmup_steps 10 \
--allow_untrained \
--predictions_path '' \
--jit \
${flags} \
"
if [ $prec == "amp" ]; then
cmd="${cmd} --amp"
fi
set -x
bash -c "${cmd}"
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime | runtime | training | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import logging
import pathlib
from typing import List
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from apex.optimizers import FusedAdam, FusedLAMB
from torch.nn.modules.loss import _Loss
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Optimizer
from torch.utils.data import DataLoader, DistributedSampler
from tqdm import tqdm
from se3_transformer.data_loading import QM9DataModule
from se3_transformer.model import SE3TransformerPooled
from se3_transformer.model.fiber import Fiber
from se3_transformer.runtime import gpu_affinity
from se3_transformer.runtime.arguments import PARSER
from se3_transformer.runtime.callbacks import QM9MetricCallback, QM9LRSchedulerCallback, BaseCallback, \
PerformanceCallback
from se3_transformer.runtime.inference import evaluate
from se3_transformer.runtime.loggers import LoggerCollection, DLLogger, WandbLogger, Logger
from se3_transformer.runtime.utils import to_cuda, get_local_rank, init_distributed, seed_everything, \
using_tensor_cores, increase_l2_fetch_granularity
def save_state(model: nn.Module, optimizer: Optimizer, epoch: int, path: pathlib.Path, callbacks: List[BaseCallback]):
""" Saves model, optimizer and epoch states to path (only once per node) """
if get_local_rank() == 0:
state_dict = model.module.state_dict() if isinstance(model, DistributedDataParallel) else model.state_dict()
checkpoint = {
'state_dict': state_dict,
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch
}
for callback in callbacks:
callback.on_checkpoint_save(checkpoint)
torch.save(checkpoint, str(path))
logging.info(f'Saved checkpoint to {str(path)}')
def load_state(model: nn.Module, optimizer: Optimizer, path: pathlib.Path, callbacks: List[BaseCallback]):
""" Loads model, optimizer and epoch states from path """
checkpoint = torch.load(str(path), map_location={'cuda:0': f'cuda:{get_local_rank()}'})
if isinstance(model, DistributedDataParallel):
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for callback in callbacks:
callback.on_checkpoint_load(checkpoint)
logging.info(f'Loaded checkpoint from {str(path)}')
return checkpoint['epoch']
def train_epoch(model, train_dataloader, loss_fn, epoch_idx, grad_scaler, optimizer, local_rank, callbacks, args):
loss_acc = torch.zeros((1,), device='cuda')
for i, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader), unit='batch',
desc=f'Epoch {epoch_idx}', disable=(args.silent or local_rank != 0)):
*inputs, target = to_cuda(batch)
for callback in callbacks:
callback.on_batch_start()
with torch.cuda.amp.autocast(enabled=args.amp):
pred = model(*inputs)
loss = loss_fn(pred, target) / args.accumulate_grad_batches
loss_acc += loss.detach()
grad_scaler.scale(loss).backward()
# gradient accumulation
if (i + 1) % args.accumulate_grad_batches == 0 or (i + 1) == len(train_dataloader):
if args.gradient_clip:
grad_scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip)
grad_scaler.step(optimizer)
grad_scaler.update()
model.zero_grad(set_to_none=True)
return loss_acc / (i + 1)
def train(model: nn.Module,
loss_fn: _Loss,
train_dataloader: DataLoader,
val_dataloader: DataLoader,
callbacks: List[BaseCallback],
logger: Logger,
args):
device = torch.cuda.current_device()
model.to(device=device)
local_rank = get_local_rank()
world_size = dist.get_world_size() if dist.is_initialized() else 1
if dist.is_initialized():
model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
model._set_static_graph()
model.train()
grad_scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
if args.optimizer == 'adam':
optimizer = FusedAdam(model.parameters(), lr=args.learning_rate, betas=(args.momentum, 0.999),
weight_decay=args.weight_decay)
elif args.optimizer == 'lamb':
optimizer = FusedLAMB(model.parameters(), lr=args.learning_rate, betas=(args.momentum, 0.999),
weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum,
weight_decay=args.weight_decay)
epoch_start = load_state(model, optimizer, args.load_ckpt_path, callbacks) if args.load_ckpt_path else 0
for callback in callbacks:
callback.on_fit_start(optimizer, args, epoch_start)
for epoch_idx in range(epoch_start, args.epochs):
if isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch_idx)
loss = train_epoch(model, train_dataloader, loss_fn, epoch_idx, grad_scaler, optimizer, local_rank, callbacks,
args)
if dist.is_initialized():
torch.distributed.all_reduce(loss)
loss /= world_size
loss = loss.item()
logging.info(f'Train loss: {loss}')
logger.log_metrics({'train loss': loss}, epoch_idx)
if epoch_idx + 1 == args.epochs:
logger.log_metrics({'train loss': loss})
for callback in callbacks:
callback.on_epoch_end()
if not args.benchmark and args.save_ckpt_path is not None and args.ckpt_interval > 0 \
and (epoch_idx + 1) % args.ckpt_interval == 0:
save_state(model, optimizer, epoch_idx, args.save_ckpt_path, callbacks)
if not args.benchmark and (
(args.eval_interval > 0 and (epoch_idx + 1) % args.eval_interval == 0) or epoch_idx + 1 == args.epochs):
evaluate(model, val_dataloader, callbacks, args)
model.train()
for callback in callbacks:
callback.on_validation_end(epoch_idx)
if args.save_ckpt_path is not None and not args.benchmark:
save_state(model, optimizer, args.epochs, args.save_ckpt_path, callbacks)
for callback in callbacks:
callback.on_fit_end()
def print_parameters_count(model):
num_params_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
logging.info(f'Number of trainable parameters: {num_params_trainable}')
if __name__ == '__main__':
is_distributed = init_distributed()
local_rank = get_local_rank()
args = PARSER.parse_args()
logging.getLogger().setLevel(logging.CRITICAL if local_rank != 0 or args.silent else logging.INFO)
logging.info('====== SE(3)-Transformer ======')
logging.info('| Training procedure |')
logging.info('===============================')
if args.seed is not None:
logging.info(f'Using seed {args.seed}')
seed_everything(args.seed)
loggers = [DLLogger(save_dir=args.log_dir, filename=args.dllogger_name)]
if args.wandb:
loggers.append(WandbLogger(name=f'QM9({args.task})', save_dir=args.log_dir, project='se3-transformer'))
logger = LoggerCollection(loggers)
datamodule = QM9DataModule(**vars(args))
model = SE3TransformerPooled(
fiber_in=Fiber({0: datamodule.NODE_FEATURE_DIM}),
fiber_out=Fiber({0: args.num_degrees * args.num_channels}),
fiber_edge=Fiber({0: datamodule.EDGE_FEATURE_DIM}),
output_dim=1,
tensor_cores=using_tensor_cores(args.amp), # use Tensor Cores more effectively
**vars(args)
)
loss_fn = nn.L1Loss()
if args.benchmark:
logging.info('Running benchmark mode')
world_size = dist.get_world_size() if dist.is_initialized() else 1
callbacks = [PerformanceCallback(
logger, args.batch_size * world_size, warmup_epochs=1 if args.epochs > 1 else 0
)]
else:
callbacks = [QM9MetricCallback(logger, targets_std=datamodule.targets_std, prefix='validation'),
QM9LRSchedulerCallback(logger, epochs=args.epochs)]
if is_distributed:
gpu_affinity.set_affinity(gpu_id=get_local_rank(), nproc_per_node=torch.cuda.device_count(), scope='socket')
torch.set_float32_matmul_precision('high')
print_parameters_count(model)
logger.log_hyperparams(vars(args))
increase_l2_fetch_granularity()
train(model,
loss_fn,
datamodule.train_dataloader(),
datamodule.val_dataloader(),
callbacks,
logger,
args)
logging.info('Training finished successfully')
|
TensorFlow2/LanguageModeling/BERT/official/utils/logs | logs | metric_hook_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_hook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.python.training import monitored_session # pylint: disable=g-bad-import-order
from official.utils.logs import metric_hook
from official.utils.testing import mock_lib
class LoggingMetricHookTest(tf.test.TestCase):
"""Tests for LoggingMetricHook."""
def setUp(self):
super(LoggingMetricHookTest, self).setUp()
self._log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self._logger = mock_lib.MockBenchmarkLogger()
def tearDown(self):
super(LoggingMetricHookTest, self).tearDown()
tf.io.gfile.rmtree(self.get_temp_dir())
def test_illegal_args(self):
with self.assertRaisesRegexp(ValueError, "nvalid every_n_iter"):
metric_hook.LoggingMetricHook(tensors=["t"], every_n_iter=0)
with self.assertRaisesRegexp(ValueError, "nvalid every_n_iter"):
metric_hook.LoggingMetricHook(tensors=["t"], every_n_iter=-10)
with self.assertRaisesRegexp(ValueError, "xactly one of"):
metric_hook.LoggingMetricHook(
tensors=["t"], every_n_iter=5, every_n_secs=5)
with self.assertRaisesRegexp(ValueError, "xactly one of"):
metric_hook.LoggingMetricHook(tensors=["t"])
with self.assertRaisesRegexp(ValueError, "metric_logger"):
metric_hook.LoggingMetricHook(tensors=["t"], every_n_iter=5)
def test_print_at_end_only(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
t = tf.constant(42.0, name="foo")
train_op = tf.constant(3)
hook = metric_hook.LoggingMetricHook(
tensors=[t.name], at_end=True, metric_logger=self._logger)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook]) # pylint: disable=protected-access
sess.run(tf.compat.v1.global_variables_initializer())
for _ in range(3):
mon_sess.run(train_op)
self.assertEqual(self._logger.logged_metric, [])
hook.end(sess)
self.assertEqual(len(self._logger.logged_metric), 1)
metric = self._logger.logged_metric[0]
self.assertRegexpMatches(metric["name"], "foo")
self.assertEqual(metric["value"], 42.0)
self.assertEqual(metric["unit"], None)
self.assertEqual(metric["global_step"], 0)
def test_global_step_not_found(self):
with tf.Graph().as_default():
t = tf.constant(42.0, name="foo")
hook = metric_hook.LoggingMetricHook(
tensors=[t.name], at_end=True, metric_logger=self._logger)
with self.assertRaisesRegexp(
RuntimeError, "should be created to use LoggingMetricHook."):
hook.begin()
def test_log_tensors(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
t1 = tf.constant(42.0, name="foo")
t2 = tf.constant(43.0, name="bar")
train_op = tf.constant(3)
hook = metric_hook.LoggingMetricHook(
tensors=[t1, t2], at_end=True, metric_logger=self._logger)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook]) # pylint: disable=protected-access
sess.run(tf.compat.v1.global_variables_initializer())
for _ in range(3):
mon_sess.run(train_op)
self.assertEqual(self._logger.logged_metric, [])
hook.end(sess)
self.assertEqual(len(self._logger.logged_metric), 2)
metric1 = self._logger.logged_metric[0]
self.assertRegexpMatches(str(metric1["name"]), "foo")
self.assertEqual(metric1["value"], 42.0)
self.assertEqual(metric1["unit"], None)
self.assertEqual(metric1["global_step"], 0)
metric2 = self._logger.logged_metric[1]
self.assertRegexpMatches(str(metric2["name"]), "bar")
self.assertEqual(metric2["value"], 43.0)
self.assertEqual(metric2["unit"], None)
self.assertEqual(metric2["global_step"], 0)
def _validate_print_every_n_steps(self, sess, at_end):
t = tf.constant(42.0, name="foo")
train_op = tf.constant(3)
hook = metric_hook.LoggingMetricHook(
tensors=[t.name], every_n_iter=10, at_end=at_end,
metric_logger=self._logger)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook]) # pylint: disable=protected-access
sess.run(tf.compat.v1.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
for _ in range(3):
self._logger.logged_metric = []
for _ in range(9):
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
mon_sess.run(train_op)
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
# Add additional run to verify proper reset when called multiple times.
self._logger.logged_metric = []
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
self._logger.logged_metric = []
hook.end(sess)
if at_end:
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
else:
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
def test_print_every_n_steps(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
self._validate_print_every_n_steps(sess, at_end=False)
# Verify proper reset.
self._validate_print_every_n_steps(sess, at_end=False)
def test_print_every_n_steps_and_end(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
self._validate_print_every_n_steps(sess, at_end=True)
# Verify proper reset.
self._validate_print_every_n_steps(sess, at_end=True)
def _validate_print_every_n_secs(self, sess, at_end):
t = tf.constant(42.0, name="foo")
train_op = tf.constant(3)
hook = metric_hook.LoggingMetricHook(
tensors=[t.name], every_n_secs=1.0, at_end=at_end,
metric_logger=self._logger)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook]) # pylint: disable=protected-access
sess.run(tf.compat.v1.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
# assertNotRegexpMatches is not supported by python 3.1 and later
self._logger.logged_metric = []
mon_sess.run(train_op)
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
time.sleep(1.0)
self._logger.logged_metric = []
mon_sess.run(train_op)
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
self._logger.logged_metric = []
hook.end(sess)
if at_end:
self.assertRegexpMatches(str(self._logger.logged_metric), t.name)
else:
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self._logger.logged_metric).find(t.name), -1)
def test_print_every_n_secs(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
self._validate_print_every_n_secs(sess, at_end=False)
# Verify proper reset.
self._validate_print_every_n_secs(sess, at_end=False)
def test_print_every_n_secs_and_end(self):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
tf.compat.v1.train.get_or_create_global_step()
self._validate_print_every_n_secs(sess, at_end=True)
# Verify proper reset.
self._validate_print_every_n_secs(sess, at_end=True)
if __name__ == "__main__":
tf.test.main()
|
PyTorch/LanguageModeling/BART/configs | configs | config_hf_xsum | {
"_num_labels": 3,
"activation_dropout": 0.0,
"activation_function": "gelu",
"add_bias_logits": false,
"add_final_layer_norm": false,
"architectures": [
"BartForConditionalGeneration"
],
"attention_dropout": 0.0,
"bos_token_id": 0,
"classif_dropout": 0.0,
"d_model": 1024,
"decoder_attention_heads": 16,
"decoder_ffn_dim": 4096,
"decoder_layerdrop": 0.0,
"decoder_layers": 12,
"decoder_start_token_id": 2,
"dropout": 0.1,
"early_stopping": true,
"encoder_attention_heads": 16,
"encoder_ffn_dim": 4096,
"encoder_layerdrop": 0.0,
"encoder_layers": 12,
"eos_token_id": 2,
"eos_token_ids": [
2
],
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1",
"2": "LABEL_2"
},
"init_std": 0.02,
"is_encoder_decoder": true,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1,
"LABEL_2": 2
},
"max_length": 62,
"max_position_embeddings": 1024,
"min_length": 11,
"model_type": "bart",
"no_repeat_ngram_size": 3,
"normalize_before": false,
"normalize_embedding": true,
"num_beams": 6,
"num_hidden_layers": 12,
"output_past": true,
"pad_token_id": 1,
"prefix": " ",
"replacing_rate": 0,
"scale_embedding": false,
"static_position_embeddings": false,
"student_decoder_layers": null,
"student_encoder_layers": null,
"task_specific_params": {},
"vocab_size": 50264
}
|
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch | fastpitch | extract_mels | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from common.text import cmudict
from common.utils import init_distributed, prepare_tmp
from fastpitch.data_function import batch_to_gpu, TTSCollate, TTSDataset
from inference import CHECKPOINT_SPECIFIC_ARGS
from models import load_and_setup_model
def parse_args(parser):
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('-d', '--dataset-path', type=str, default='./',
help='Path to dataset')
general = parser.add_argument_group('general setup')
general.add_argument('--checkpoint-path', type=str, required=True,
help='Checkpoint path to fastpitch model')
general.add_argument('--resume', action='store_true',
help='Load last checkpoint from training')
general.add_argument('--amp', action='store_true',
help='Enable AMP')
general.add_argument('--cuda', action='store_true',
help='Run on GPU using CUDA')
general.add_argument('--cudnn-benchmark', action='store_true',
help='Enable cudnn benchmark mode')
general.add_argument('-bs', '--batch-size', type=int, required=True,
help='Batch size per GPU')
data = parser.add_argument_group('dataset parameters')
data.add_argument('--dataset-files', type=str, nargs='*', required=True,
help='Paths to dataset filelists.')
data.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
data.add_argument('--symbol-set', type=str, default='english_basic',
help='Define symbol set for input text')
data.add_argument('--p-arpabet', type=float, default=0.0,
help='Probability of using arpabets instead of graphemes '
'for each word; set 0 for pure grapheme training')
data.add_argument('--heteronyms-path', type=str, default='data/cmudict/heteronyms',
help='Path to the list of heteronyms')
data.add_argument('--cmudict-path', type=str, default='data/cmudict/cmudict-0.7b',
help='Path to the pronouncing dictionary')
data.add_argument('--prepend-space-to-text', action='store_true',
help='Capture leading silence with a space token')
data.add_argument('--append-space-to-text', action='store_true',
help='Capture trailing silence with a space token')
cond = parser.add_argument_group('data for conditioning')
cond.add_argument('--load-pitch-from-disk', action='store_true',
help='Use pitch cached on disk with prepare_dataset.py')
cond.add_argument('--pitch-online-method', default='pyin', choices=['pyin'],
help='Calculate pitch on the fly during trainig')
cond.add_argument('--pitch-online-dir', type=str, default=None,
help='A directory for storing pitch calculated on-line')
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
dist = parser.add_argument_group('distributed setup')
dist.add_argument('--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0),
help='Rank of the process for multiproc; do not set manually')
dist.add_argument('--world_size', type=int, default=os.getenv('WORLD_SIZE', 1),
help='Number of processes for multiproc; do not set manually')
return parser
def main():
parser = argparse.ArgumentParser(
description='FastPitch spectrogram extraction', allow_abbrev=False)
parser = parse_args(parser)
args, unk_args = parser.parse_known_args()
torch.backends.cudnn.benchmark = args.cudnn_benchmark
model, model_config, train_setup = load_and_setup_model(
'FastPitch', parser, args.checkpoint_path, args.amp, unk_args=unk_args,
device=torch.device('cuda' if args.cuda else 'cpu'))
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
# use train_setup loaded from the checkpoint (sampling_rate, symbol_set, etc.)
for k in CHECKPOINT_SPECIFIC_ARGS:
if k in train_setup and getattr(args, k) != train_setup[k]:
v = train_setup[k]
print(f'Overwriting args.{k}={getattr(args, k)} with {v} '
f'from {args.checkpoint_path} checkpoint')
setattr(args, k, v)
if args.p_arpabet > 0.0:
cmudict.initialize(args.cmudict_path, args.heteronyms_path)
distributed_run = args.world_size > 1
if distributed_run:
init_distributed(args, args.world_size, args.local_rank)
model = DDP(model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
if args.local_rank == 0:
Path(args.output).mkdir(exist_ok=True, parents=True)
prepare_tmp(args.pitch_online_dir)
args.n_speakers = model_config['n_speakers']
args.n_mel_channels = model_config['n_mel_channels']
trainset = TTSDataset(audiopaths_and_text=args.dataset_files,
load_mel_from_disk=False, **vars(args))
dataset_loader = DataLoader(
trainset, num_workers=16, shuffle=False, batch_size=args.batch_size,
sampler=(DistributedSampler(trainset) if distributed_run else None),
pin_memory=True, drop_last=False, collate_fn=TTSCollate())
with torch.no_grad():
for batch in tqdm(dataset_loader, 'Extracting mels'):
x, y, num_frames = batch_to_gpu(batch)
_, _, _, mel_lens, *_, audiopaths = x
with torch.cuda.amp.autocast(enabled=args.amp):
mel_out, *_ = model(x, use_gt_pitch=True)
mel_out = mel_out.transpose(1, 2)
assert mel_out.size(1) == args.n_mel_channels, mel_out.shape
for apath, mel, len_ in zip(audiopaths, mel_out, mel_lens):
np.save(Path(args.output, Path(apath).stem + '.npy'),
mel[:, :len_.item()].cpu().numpy())
if __name__ == '__main__':
main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import shutil
import subprocess
from enum import Enum
from typing import Any, List, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .logger import LOGGER
def format_env_key(s: str):
"""
Format environmental variable key
Args:
s: String to format
Returns:
Upper cased string
"""
return s.upper()
def format_env_value(value: Any) -> str:
"""
Format environment variable value
Args:
value: value to be formatted
Returns:
Formatted value as a string
"""
value = value if not isinstance(value, Enum) else value.value
value = value if type(value) not in [list, tuple] else ",".join(map(str, value))
value = str(value)
return value
def get_result_path(result_path: str) -> str:
"""
Map result path when different variants passed ex. with env variable in path
Args:
result_path: Path to result file
Returns:
str
"""
for env_var, val in os.environ.items():
result_path = result_path.replace(f"${{{env_var}}}", val)
if result_path.startswith("/"):
return result_path
if result_path.startswith("./"):
result_path = result_path[2:]
return result_path
def clean_directory(directory: pathlib.Path) -> None:
"""
Remove all files and directories from directory
Args:
directory: Path to directory which should be cleaned
Returns:
None
"""
LOGGER.debug(f"Cleaning {directory.as_posix()}")
if not directory.is_dir():
LOGGER.warning(f"{directory.name} is not a directory.")
return
for item in os.listdir(directory):
item_path = directory / item
if item_path.is_dir():
LOGGER.debug(f"Remove dir {item_path.as_posix()}")
shutil.rmtree(item_path.as_posix())
elif item_path.is_file():
LOGGER.debug(f"Remove file: {item_path.as_posix()}")
item_path.unlink()
else:
LOGGER.warning(f"Cannot remove item {item_path.name}. Not a file or directory.")
def exec_command(command: Command) -> None:
"""
Execute command
Args:
command: Command to run
"""
try:
process = subprocess.Popen(
[str(command)],
shell=True,
start_new_session=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
)
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
print(output.rstrip())
LOGGER.write(output)
result = process.poll()
if result != 0:
raise RunnerException(f"Command {command} failed with exit status: {result}")
except subprocess.CalledProcessError as e:
raise RunnerException(f"Running command {e.cmd} failed with exit status {e.returncode} : {e.output}")
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer | maintainer | maintainer | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Any, Dict, List, Optional, Union
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .container import Container
class Maintainer(abc.ABC):
@abc.abstractmethod
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> Container:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
Container object
"""
pass
@abc.abstractmethod
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
pass
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/model-config/tacotron2waveglow | tacotron2waveglow | config | name: "tacotron2waveglow"
platform: "custom"
default_model_filename: "libtt2i_trtis.so"
max_batch_size: 32
input [
{
name: "INPUT"
data_type: TYPE_STRING
dims: [ -1 ]
}
]
output [
{
name: "OUTPUT"
data_type: TYPE_FP32
dims: [ -1 ]
},
{
name: "OUTPUT_LENGTH"
data_type: TYPE_INT32
dims: [ -1 ]
}
]
parameters [
{
key: "engine_path"
value: { string_value: "/engines" }
},
{
key: "mapping_path"
value: { string_value: "/models/tacotron2waveglow/mapping.txt" }
},
{
key: "use_denoiser"
value: { string_value: "no" }
}
]
|
TensorFlow/Translation/GNMT/utils | utils | nmt_utils | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions specifically for NMT."""
from __future__ import print_function
import codecs
import time
import numpy as np
import tensorflow as tf
from utils import misc_utils as utils
__all__ = ["get_translation"]
def get_translation(nmt_outputs, sent_id, tgt_eos, subword_option):
"""Given batch decoding outputs, select a sentence and turn to text."""
if tgt_eos: tgt_eos = tgt_eos.encode("utf-8")
# Select a sentence
output = nmt_outputs[sent_id, :].tolist()
# If there is an eos symbol in outputs, cut them at that point.
if tgt_eos and tgt_eos in output:
output = output[:output.index(tgt_eos)]
if subword_option == "bpe": # BPE
translation = utils.format_bpe_text(output)
elif subword_option == "spm": # SPM
translation = utils.format_spm_text(output)
else:
translation = utils.format_text(output)
return translation, len(output)
|
PyTorch/SpeechSynthesis/FastPitch/common | common | layers | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
from librosa.filters import mel as librosa_mel_fn
from common.audio_processing import (dynamic_range_compression,
dynamic_range_decompression)
from common.stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear',
batch_norm=False):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
self.norm = torch.nn.BatchNorm1D(out_channels) if batch_norm else None
torch.nn.init.xavier_uniform_(
self.conv.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
if self.norm is None:
return self.conv(signal)
else:
return self.norm(self.conv(signal))
class ConvReLUNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNorm, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size,
padding=(kernel_size // 2))
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal):
out = F.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2)).transpose(1, 2).to(signal.dtype)
return self.dropout(out)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sr=sampling_rate,
n_fft=filter_length,
n_mels=n_mel_channels,
fmin=mel_fmin,
fmax=mel_fmax
)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime | runtime | inference | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
from typing import List
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
from se3_transformer.runtime import gpu_affinity
from se3_transformer.runtime.arguments import PARSER
from se3_transformer.runtime.callbacks import BaseCallback
from se3_transformer.runtime.loggers import DLLogger, WandbLogger, LoggerCollection
from se3_transformer.runtime.utils import to_cuda, get_local_rank
@torch.inference_mode()
def evaluate(model: nn.Module,
dataloader: DataLoader,
callbacks: List[BaseCallback],
args):
model.eval()
for i, batch in tqdm(enumerate(dataloader), total=len(dataloader), unit='batch', desc=f'Evaluation',
leave=False, disable=(args.silent or get_local_rank() != 0)):
*input, target = to_cuda(batch)
for callback in callbacks:
callback.on_batch_start()
with torch.cuda.amp.autocast(enabled=args.amp):
pred = model(*input)
for callback in callbacks:
callback.on_validation_step(input, target, pred)
if __name__ == '__main__':
from se3_transformer.runtime.callbacks import QM9MetricCallback, PerformanceCallback
from se3_transformer.runtime.utils import init_distributed, seed_everything
from se3_transformer.model import SE3TransformerPooled, Fiber
from se3_transformer.data_loading import QM9DataModule
import torch.distributed as dist
import logging
import sys
is_distributed = init_distributed()
local_rank = get_local_rank()
args = PARSER.parse_args()
logging.getLogger().setLevel(logging.CRITICAL if local_rank != 0 or args.silent else logging.INFO)
logging.info('====== SE(3)-Transformer ======')
logging.info('| Inference on the test set |')
logging.info('===============================')
if not args.benchmark and args.load_ckpt_path is None:
logging.error('No load_ckpt_path provided, you need to provide a saved model to evaluate')
sys.exit(1)
if args.benchmark:
logging.info('Running benchmark mode with one warmup pass')
if args.seed is not None:
seed_everything(args.seed)
major_cc, minor_cc = torch.cuda.get_device_capability()
loggers = [DLLogger(save_dir=args.log_dir, filename=args.dllogger_name)]
if args.wandb:
loggers.append(WandbLogger(name=f'QM9({args.task})', save_dir=args.log_dir, project='se3-transformer'))
logger = LoggerCollection(loggers)
datamodule = QM9DataModule(**vars(args))
model = SE3TransformerPooled(
fiber_in=Fiber({0: datamodule.NODE_FEATURE_DIM}),
fiber_out=Fiber({0: args.num_degrees * args.num_channels}),
fiber_edge=Fiber({0: datamodule.EDGE_FEATURE_DIM}),
output_dim=1,
tensor_cores=(args.amp and major_cc >= 7) or major_cc >= 8, # use Tensor Cores more effectively
**vars(args)
)
callbacks = [QM9MetricCallback(logger, targets_std=datamodule.targets_std, prefix='test')]
model.to(device=torch.cuda.current_device())
if args.load_ckpt_path is not None:
checkpoint = torch.load(str(args.load_ckpt_path), map_location={'cuda:0': f'cuda:{local_rank}'})
model.load_state_dict(checkpoint['state_dict'])
if is_distributed:
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(local_rank, nproc_per_node, scope='socket')
model = DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
model._set_static_graph()
torch.set_float32_matmul_precision('high')
test_dataloader = datamodule.test_dataloader() if not args.benchmark else datamodule.train_dataloader()
if not args.benchmark:
evaluate(model,
test_dataloader,
callbacks,
args)
for callback in callbacks:
callback.on_validation_end()
else:
world_size = dist.get_world_size() if dist.is_initialized() else 1
callbacks = [PerformanceCallback(
logger, args.batch_size * world_size,
warmup_epochs=1 if args.epochs > 1 else 0,
mode='inference'
)]
for _ in range(args.epochs):
evaluate(model,
test_dataloader,
callbacks,
args)
callbacks[0].on_epoch_end()
callbacks[0].on_fit_end()
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync | # SSD with Mobilenet v1 FPN feature extractor, shared box predictor and focal
# loss (a.k.a Retinanet).
# See Lin et al, https://arxiv.org/abs/1708.02002
# Trained on COCO, initialized from Imagenet classification checkpoint
# Achieves 29.7 mAP on COCO14 minival dataset.
# This config is TPU compatible
model {
ssd {
inplace_batchnorm_update: true
freeze_batchnorm: false
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
encode_background_as_zeros: true
anchor_generator {
multiscale_anchor_generator {
min_level: 3
max_level: 7
anchor_scale: 4.0
aspect_ratios: [1.0, 2.0, 0.5]
scales_per_octave: 2
}
}
image_resizer {
fixed_shape_resizer {
height: 640
width: 640
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
depth: 256
class_prediction_bias_init: -4.6
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
}
num_layers_before_predictor: 4
kernel_size: 3
}
}
feature_extractor {
type: 'ssd_mobilenet_v1_fpn'
fpn {
min_level: 3
max_level: 7
}
min_depth: 16
depth_multiplier: 1.0
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
}
override_base_feature_extractor_hyperparams: true
}
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
}
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
normalize_loc_loss_by_codesize: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
batch_size: 64
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 25000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_crop_image {
min_object_covered: 0.0
min_aspect_ratio: 0.75
max_aspect_ratio: 3.0
min_area: 0.75
max_area: 1.0
overlap_thresh: 0.0
}
}
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .04
total_steps: 25000
warmup_learning_rate: .013333
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-00000-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
num_examples: 8000
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-00000-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
} |
PyTorch/LanguageModeling/BERT/data | data | bertPrep | # Copyright (c) 2019-2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BookscorpusTextFormatting
import Downloader
import TextSharding
import WikicorpusTextFormatting
import argparse
import itertools
import multiprocessing
import os
import pprint
import subprocess
def main(args):
working_dir = os.environ['BERT_PREP_WORKING_DIR']
print('Working Directory:', working_dir)
print('Action:', args.action)
print('Dataset Name:', args.dataset)
if args.input_files:
args.input_files = args.input_files.split(',')
hdf5_tfrecord_folder_prefix = "_lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \
+ "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \
+ "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor)
directory_structure = {
'download' : working_dir + '/download', # Downloaded and decompressed
'extracted' : working_dir +'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor)
'formatted' : working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same
'sharded' : working_dir + '/sharded_' + "training_shards_" + str(args.n_training_shards) + "_test_shards_" + str(args.n_test_shards) + "_fraction_" + str(args.fraction_test_set),
'tfrecord' : working_dir + '/tfrecord'+ hdf5_tfrecord_folder_prefix,
'hdf5': working_dir + '/hdf5' + hdf5_tfrecord_folder_prefix
}
print('\nDirectory Structure:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(directory_structure)
print('')
if args.action == 'download':
if not os.path.exists(directory_structure['download']):
os.makedirs(directory_structure['download'])
downloader = Downloader.Downloader(args.dataset, directory_structure['download'])
downloader.download()
elif args.action == 'text_formatting':
assert args.dataset != 'google_pretrained_weights' and args.dataset != 'nvidia_pretrained_weights' and args.dataset != 'squad' and args.dataset != 'mrpc', 'Cannot perform text_formatting on pretrained weights'
if not os.path.exists(directory_structure['extracted']):
os.makedirs(directory_structure['extracted'])
if not os.path.exists(directory_structure['formatted']):
os.makedirs(directory_structure['formatted'])
if args.dataset == 'bookscorpus':
books_path = directory_structure['download'] + '/bookscorpus'
#books_path = directory_structure['download']
output_filename = directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt'
books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(books_path, output_filename, recursive=True)
books_formatter.merge()
elif args.dataset == 'wikicorpus_en':
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
#wikiextractor_process.communicate()
wiki_path = directory_structure['extracted'] + '/wikicorpus_en'
output_filename = directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
elif args.dataset == 'wikicorpus_zh':
assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.'
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
#wikiextractor_process.communicate()
wiki_path = directory_structure['extracted'] + '/wikicorpus_zh'
output_filename = directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
assert os.stat(output_filename).st_size > 0, 'File glob did not pick up extracted wiki files from WikiExtractor.'
elif args.action == 'sharding':
# Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces)
if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset:
if args.input_files is None:
if args.dataset == 'bookscorpus':
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt']
elif args.dataset == 'wikicorpus_en':
args.input_files = [directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
elif args.dataset == 'wikicorpus_zh':
args.input_files = [directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt']
elif args.dataset == 'books_wiki_en_corpus':
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
output_file_prefix = directory_structure['sharded'] + '/' + args.dataset + '/' + args.dataset
if not os.path.exists(directory_structure['sharded']):
os.makedirs(directory_structure['sharded'])
if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset):
os.makedirs(directory_structure['sharded'] + '/' + args.dataset)
# Segmentation is here because all datasets look the same in one article/book/whatever per line format, and
# it seemed unnecessarily complicated to add an additional preprocessing step to call just for this.
# Different languages (e.g., Chinese simplified/traditional) may require translation and
# other packages to be called from here -- just add a conditional branch for those extra steps
segmenter = TextSharding.NLTKSegmenter()
sharding = TextSharding.Sharding(args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set)
sharding.load_articles()
sharding.segment_articles_into_sentences(segmenter)
sharding.distribute_articles_over_shards()
sharding.write_shards_to_disk()
else:
assert False, 'Unsupported dataset for sharding'
elif args.action == 'create_tfrecord_files':
assert False, 'TFrecord creation not supported in this PyTorch model example release.' \
''
if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset):
os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset)
def create_record_worker(filename_prefix, shard_id, output_format='tfrecord'):
bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + directory_structure['tfrecord'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length)
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq)
bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob)
bert_preprocessing_command += ' --random_seed=' + str(args.random_seed)
bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor)
bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % args.n_processes == 0 and shard_id > 0:
bert_preprocessing_process.wait()
return last_process
output_file_prefix = args.dataset
for i in range(args.n_training_shards):
last_process =create_record_worker(output_file_prefix + '_training', i)
last_process.wait()
for i in range(args.n_test_shards):
last_process = create_record_worker(output_file_prefix + '_test', i)
last_process.wait()
elif args.action == 'create_hdf5_files':
last_process = None
if not os.path.exists(directory_structure['hdf5'] + "/" + args.dataset):
os.makedirs(directory_structure['hdf5'] + "/" + args.dataset)
def create_record_worker(filename_prefix, shard_id, output_format='hdf5'):
bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + directory_structure['hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length)
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq)
bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob)
bert_preprocessing_command += ' --random_seed=' + str(args.random_seed)
bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor)
bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % args.n_processes == 0 and shard_id > 0:
bert_preprocessing_process.wait()
return last_process
output_file_prefix = args.dataset
for i in range(args.n_training_shards):
last_process = create_record_worker(output_file_prefix + '_training', i)
last_process.wait()
for i in range(args.n_test_shards):
last_process = create_record_worker(output_file_prefix + '_test', i)
last_process.wait()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Preprocessing Application for Everything BERT-related'
)
parser.add_argument(
'--action',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords',
choices={
'download', # Download and verify mdf5/sha sums
'text_formatting', # Convert into a file that contains one article/book per line
'sharding', # Convert previous formatted text into shards containing one sentence per line
'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info
'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info
}
)
parser.add_argument(
'--dataset',
type=str,
help='Specify the dataset to perform --action on',
choices={
'bookscorpus',
'wikicorpus_en',
'wikicorpus_zh',
'books_wiki_en_corpus',
'google_pretrained_weights',
'nvidia_pretrained_weights',
'mrpc',
'sst-2',
'squad',
'all'
}
)
parser.add_argument(
'--input_files',
type=str,
help='Specify the input files in a comma-separated list (no spaces)'
)
parser.add_argument(
'--n_training_shards',
type=int,
help='Specify the number of training shards to generate',
default=256
)
parser.add_argument(
'--n_test_shards',
type=int,
help='Specify the number of test shards to generate',
default=256
)
parser.add_argument(
'--fraction_test_set',
type=float,
help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)',
default=0.1
)
parser.add_argument(
'--segmentation_method',
type=str,
help='Specify your choice of sentence segmentation',
choices={
'nltk'
},
default='nltk'
)
parser.add_argument(
'--n_processes',
type=int,
help='Specify the max number of processes to allow at one time',
default=4
)
parser.add_argument(
'--random_seed',
type=int,
help='Specify the base seed to use for any random number generation',
default=12345
)
parser.add_argument(
'--dupe_factor',
type=int,
help='Specify the duplication factor',
default=5
)
parser.add_argument(
'--masked_lm_prob',
type=float,
help='Specify the probability for masked lm',
default=0.15
)
parser.add_argument(
'--max_seq_length',
type=int,
help='Specify the maximum sequence length',
default=512
)
parser.add_argument(
'--max_predictions_per_seq',
type=int,
help='Specify the maximum number of masked words per sequence',
default=20
)
parser.add_argument(
'--do_lower_case',
type=int,
help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)',
default=1
)
parser.add_argument(
'--vocab_file',
type=str,
help='Specify absolute path to vocab file to use)'
)
parser.add_argument(
'--skip_wikiextractor',
type=int,
help='Specify whether to skip wikiextractor step 0=False, 1=True',
default=0
)
parser.add_argument(
'--interactive_json_config_generator',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords'
)
args = parser.parse_args()
main(args)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | characterMapping | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_CHARACTERMAPPING_H
#define TT2I_CHARACTERMAPPING_H
#include "timedObject.h"
#include <string>
#include <unordered_map>
#include <vector>
namespace tts
{
class CharacterMapping : public TimedObject
{
public:
/**
* @brief Create a default character mapping. This maps standard 'ascii'
* characters as follows:
* ```
* _-!\'(),.:;? ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
* ```
*
* @return The character mapping.
*/
static CharacterMapping defaultMapping();
/**
* @brief Create an empty character mapping.
*/
CharacterMapping();
/**
* @brief Create a character mapping based on the given set of characters.
*
* @param mapping The set of characters to map in order.
*/
CharacterMapping(const std::vector<char>& mapping);
/**
* @brief Create a character mapping based on the given set of string,
* treating each string as a symbol.
*
* @param mapping The set of symbols to map in order.
*/
CharacterMapping(const std::vector<std::string>& mapping);
/**
* @brief Set the given character to the given sequence number.
*
* @param c The character.
* @param n The sequence number.
*/
void set(char c, int32_t n);
/**
* @brief Set the given symbol to the given sequence number.
*
* @param c The symbol.
* @param n The sequence number.
*/
void set(const std::string& c, int32_t n);
/**
* @brief Map the given character to the given sequence number.
*
* @param c The character to map.
*
* @return The sequence number.
*/
int32_t get(char c) const;
/**
* @brief Map the given symbol to the given sequence number.
*
* @param c The symbol to map.
*
* @return The sequence number.
*/
int32_t get(const std::string& c) const;
/**
* @brief Convert a string of symbols to a sequence.
*
* @param input The string of symbols.
*
* @return The sequence.
*/
std::vector<int32_t> map(const std::string& input);
/**
* @brief Map a set of bytes to sequence numbers. Several bytes may map to a
* single sequence nuumber, thus the output length will be equal or less than
* the input length.
*
* @param input The input.
* @param inputSize The length of the input in characters.
* @param output Must be of size at least `inputSize`.
* @param outputSize The final size of the output (will be equal to or less
* than `inputSize`).
*/
void map(const char* input, size_t inputSize, int32_t* output, size_t* outputSize);
private:
std::unordered_map<std::string, int32_t> mMapping;
};
} // namespace tts
#endif
|
TensorFlow/Classification/ConvNets | ConvNets | README | # Resnet-family Convolutional Neural Networks for Image Classification in Tensorflow
In this repository you will find implementation of Resnet and its variations for image classification.
Convolutional Network models for TensorFlow1 are no longer maintained and will soon become unavailable, please consider PyTorch or TensorFlow2 models as a substitute for your requirements.
## Table Of Contents
* [Models](#models)
* [Validation accuracy results](#validation-accuracy-results)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 40G)](#training-performance-nvidia-dgx-a100-8x-a100-40g)
* [Training performance: NVIDIA DGX-1 (8x V100 16G)](#training-performance-nvidia-dgx-1-8x-v100-16g)
* [Release notes](#release-notes)
* [Changelog](#changelog)
## Models
The following table provides links to where you can find additional information on each model:
| **Model** | **Link**|
|-----------|---------|
| resnet50 | [README](./resnet50v1.5/README.md) |
| resnext101-32x4d | [README](./resnext101-32x4d/README.md) |
| se-resnext101-32x4d | [README](./se-resnext101-32x4d/README.md) |
## Validation accuracy results
Our results were obtained by running the applicable training scripts in the tensorflow-20.06-tf1-py3 NGC container
on NVIDIA DGX-1 with (8x V100 16G) GPUs. The specific training script that was run is documented in the corresponding model's README.
The following table shows the validation accuracy results of the
three classification models side-by-side.
| **arch** | **AMP Top1** | **AMP Top5** | **FP32 Top1** | **FP32 Top5** |
|:-:|:-:|:-:|:-:|:-:|
| resnet50 | 78.35 | 94.21 | 78.34 | 94.21 |
| resnext101-32x4d | 80.21 | 95.00 | 80.21 | 94.99 |
| se-resnext101-32x4d | 80.87 | 95.35 | 80.84 | 95.37 |
## Training performance results
### Training performance: NVIDIA DGX A100 (8x A100 40G)
Our results were obtained by running the applicable
training scripts in the tensorflow-20.06-tf1-py3 NGC container
on NVIDIA DGX A100 with (8x A100 40G) GPUs.
Performance numbers (in images per second)
were averaged over an entire training epoch.
The specific training script that was run is documented
in the corresponding model's README.
The following table shows the training performance results of the
three classification models side-by-side.
| **arch** | **Mixed Precision XLA** | **TF32 XLA** | **Mixed Precision speedup** |
|:-:|:-:|:-:|:-:|
| resnet50 | 16400 img/s | 6300 img/s | 2.60x |
| resnext101-32x4d | 8000 img/s | 2630 img/s | 3.05x |
| se-resnext101-32x4d | 6930 img/s | 2400 img/s | 2.88x |
### Training performance: NVIDIA DGX-1 (8x V100 16G)
Our results were obtained by running the applicable
training scripts in the tensorflow-20.06-tf1-py3 NGC container
on NVIDIA DGX-1 with (8x V100 16G) GPUs.
Performance numbers (in images per second)
were averaged over an entire training epoch.
The specific training script that was run is documented
in the corresponding model's README.
The following table shows the training performance results of the
three classification models side-by-side.
| **arch** | **Mixed Precision XLA** | **FP32 XLA** | **Mixed Precision speedup** |
|:-:|:-:|:-:|:-:|
| resnet50 | 9510 img/s | 3170 img/s | 3.00x |
| resnext101-32x4d | 4160 img/s | 1210 img/s | 3.44x |
| se-resnext101-32x4d | 3360 img/s | 1120 img/s | 3.00x |
## Release notes
### Changelog
April 2021
- Ceased maintenance of ConvNets in TensorFlow1
June 2020
- ConvNets repo restructurization
- Initial release of ResNext and SE-Resnext
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/transcoding | transcoding | small_csv | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
feature_spec:
cat_0.bin:
cardinality: 10
cat_1.bin:
cardinality: 23412
cat_2.bin:
cardinality: 45000
cat_3.bin:
cardinality: 100
cat_4.bin:
cardinality: 50
cat_5.bin:
cardinality: 127
label: {}
num_0: {}
num_1: {}
num_2: {}
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: csv
- features:
- label
files:
- test/label.bin
type: csv
- features:
- cat_0.bin
- cat_1.bin
files:
- test/catpart1.bin
type: csv
- features:
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
files:
- test/catpart2.bin
type: csv
train:
- features: *id001
files:
- train/numerical.bin
type: csv
- features:
- label
files:
- train/label.bin
type: csv
- features:
- cat_0.bin
- cat_1.bin
- cat_2.bin
files:
- train/catpart0.bin
type: csv
- features:
- cat_3.bin
- cat_4.bin
- cat_5.bin
files:
- train/catpart1.bin
type: csv
|
PyTorch/Recommendation/DLRM/preproc | preproc | DGX-A100_config | #!/bin/bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the environment variables to run spark job
# should modify below environment variables
# below numbers should be adjusted according to the resource of your running environment
# set the total number of CPU cores, spark can use
export TOTAL_CORES=256
# set the number of executors
export NUM_EXECUTORS=8
# the cores for each executor, it'll be calculated
export NUM_EXECUTOR_CORES=$((${TOTAL_CORES}/${NUM_EXECUTORS}))
# unit: GB, set the max memory you want to use
export TOTAL_MEMORY=2000
# unit: GB, set the memory for driver
export DRIVER_MEMORY=32
# the memory per executor
export EXECUTOR_MEMORY=$(((${TOTAL_MEMORY}-${DRIVER_MEMORY})/${NUM_EXECUTORS}-16))
|
PyTorch/LanguageModeling/BERT | BERT | tokenization_utils | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for python and fast tokenizers. Fast tokenizers are provided by HuggingFace's tokenizers library."""
import copy
import functools
import itertools
import json
import logging
import operator
import os
import re
import warnings
import unicodedata
import collections
from collections import UserDict, defaultdict
from contextlib import contextmanager
from enum import Enum
from typing import Any, Dict, List, MutableMapping, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from tokenizers import AddedToken as AddedTokenFast
from tokenizers import Encoding as EncodingFast
from tokenizers import BertWordPieceTokenizer
from tokenizers.decoders import Decoder as DecoderFast
from tokenizers.implementations import BaseTokenizer as BaseTokenizerFast
from file_utils import cached_path, hf_bucket_url, is_remote_url, is_tf_available, is_torch_available, torch_required
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
NO_PAD_TOKEN_FOR_BATCH_MSG = (
"No padding token is set for this model, therefore no batch can be made with uneven "
"sequences. Set a padding token or adjust the lengths of the sequences building the "
"batch so that every sequence is of the same length."
)
UNEVEN_SEQUENCES_FOR_BATCH_MSG = (
"The sequences building the batch are not of the same size, no tensor "
"can be built. Set `pad_to_max_length=True` to pad the smaller sequences"
"up to the larger sequence's length."
)
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
# Define type aliases and NamedTuples
TextInput = str
PreTokenizedInput = List[str]
EncodedInput = List[int]
TextInputPair = Tuple[str, str]
PreTokenizedInputPair = Tuple[List[str], List[str]]
EncodedInputPair = Tuple[List[int], List[int]]
class TensorType(Enum):
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
class CharSpan(NamedTuple):
""" Character span in the original string
Args:
start: index of the first character in the original string
end: index of the character following the last character in the original string
"""
start: int
end: int
class TokenSpan(NamedTuple):
""" Token span in an encoded string (list of tokens)
Args:
start: index of the first token in the span
end: index of the token following the last token in the span
"""
start: int
end: int
def flatten(x: Sequence):
"""
Flatten the provided (potentially nested) sequence
Args:
x (Sequence): Potentially nested sequence to flatten
Returns:
list: Flattened sequence
"""
return functools.reduce(operator.iconcat, x, [])
@contextmanager
def truncate_and_pad(
tokenizer: BaseTokenizerFast,
max_length: int,
stride: int,
strategy: str,
pad_to_max_length: bool,
padding_side: str,
pad_token_id: int,
pad_token_type_id: int,
pad_token: str,
):
""" This contextmanager is in charge of defining the truncation and the padding strategies for fast tokenizers
(provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.
This contextmanager assumes the provider tokenizer has no padding / truncation strategy
before the managed section. If your tokenizer set a padding / truncation strategy before,
then it will be reset to no padding/truncation when exiting the managed section.
Args:
tokenizer (BaseTokenizerFast): The tokenizer which will be used
max_length (int): The maximum size of the sequence
stride (int): The stride to use when handling overflow
strategy (str): Overflowing logic to use
pad_to_max_length (bool): Boolean indicating if the output needs to be padded up to max_length
padding_side (str): "left" or "right" indicating the direction the output sequence will be padded
pad_token_id (int): The integer representation of the padding token to use
pad_token_type_id (int): The integer representation of the padding token type to use
pad_token (str): The string representation of the padding token to use
"""
# Handle all the truncation and padding stuff
if max_length is not None:
tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
if pad_to_max_length and (pad_token and pad_token_id >= 0):
tokenizer.enable_padding(
max_length=max_length,
direction=padding_side,
pad_id=pad_token_id,
pad_type_id=pad_token_type_id,
pad_token=pad_token,
)
elif pad_to_max_length:
logger.warning(
"Disabled padding because no padding token set (pad_token: {}, pad_token_id: {}).\n"
"To remove this error, you can add a new pad token and then resize model embedding:\n"
"\ttokenizer.pad_token = '<PAD>'\n\tmodel.resize_token_embeddings(len(tokenizer))".format(
pad_token, pad_token_id
)
)
yield
# TODO(morgan, anthony): once we have a simple way to serialize tokenizers maybe store and restore the state afterward
# to avoid destructing the padding / truncation strategy as we do now.
if max_length is not None:
tokenizer.no_truncation()
if pad_to_max_length and (pad_token and pad_token_id >= 0):
tokenizer.no_padding()
def convert_to_tensors(
batch_outputs: MutableMapping, return_tensors: Union[str, TensorType], prepend_batch_axis: bool = False
) -> MutableMapping:
# Convert to TensorType
if not isinstance(return_tensors, TensorType):
return_tensors = TensorType(return_tensors)
# Get a function reference for the correct framework
if return_tensors == TensorType.TENSORFLOW and is_tf_available():
as_tensor = tf.constant
elif return_tensors == TensorType.PYTORCH and is_torch_available():
as_tensor = torch.tensor
elif return_tensors == TensorType.NUMPY:
as_tensor = np.asarray
else:
raise ImportError(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
return_tensors
)
)
# Do the tensor conversion in batch
for key, value in batch_outputs.items():
try:
if prepend_batch_axis:
value = [value]
tensor = as_tensor(value)
# at-least2d
if tensor.ndim > 2:
tensor = tensor.squeeze(0)
elif tensor.ndim < 2:
tensor = tensor[None, :]
batch_outputs[key] = tensor
except ValueError:
if None in [item for sequence in value for item in sequence]:
raise ValueError(NO_PAD_TOKEN_FOR_BATCH_MSG)
else:
raise ValueError(UNEVEN_SEQUENCES_FOR_BATCH_MSG)
return batch_outputs
class BatchEncoding(UserDict):
""" BatchEncoding hold the output of the encode and batch_encode methods (tokens, attention_masks, etc).
This class is derived from a python Dictionary and can be used as a dictionnary.
In addition, this class expose utility methods to map from word/char space to token space.
Args:
data (:obj:`dict`): Dictionary of lists/arrays returned by the encode/batch_encode methods ('input_ids', 'attention_mask'...)
encoding (:obj:`EncodingFast`, :obj:`list(EncodingFast)`, `optional`, defaults to :obj:`None`):
If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/char space to token space
the `EncodingFast` instance or list of instance (for batches) hold these informations.
"""
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
):
super().__init__(data)
if isinstance(encoding, EncodingFast):
encoding = [encoding]
self._encodings = encoding
def __getitem__(self, item: Union[int, str]) -> EncodingFast:
""" If the key is a string, get the value of the dict associated to `key` ('input_ids', 'attention_mask'...)
If the key is an integer, get the EncodingFast for batch item with index `key`
"""
if isinstance(item, str):
return self.data[item]
elif self._encodings is not None:
return self._encodings[item]
else:
raise KeyError(
"Indexing with integers (to access backend Encoding for a given batch index) "
"is not available when using Python based tokenizers"
)
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
# After this point:
# Extended properties and methods only available for fast (Rust-based) tokenizers
# provided by HuggingFace tokenizers library.
@property
def encodings(self) -> Optional[List[EncodingFast]]:
"""
Return the list all encoding from the tokenization process
Returns: List[EncodingFast] or None if input was tokenized through Python (i.e. not fast) tokenizer
"""
return self._encodings
def tokens(self, batch_index: int = 0) -> List[int]:
if not self._encodings:
raise ValueError("tokens() is not available when using Python based tokenizers")
return self._encodings[batch_index].tokens
def words(self, batch_index: int = 0) -> List[Optional[int]]:
if not self._encodings:
raise ValueError("words() is not available when using Python based tokenizers")
return self._encodings[batch_index].words
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
""" Get the index of the word corresponding (i.e. comprising) to an encoded token
in a sequence of the batch.
Can be called as:
- self.token_to_word(token_index) if batch size is 1
- self.token_to_word(batch_index, token_index) if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token in the sequence.
Returns:
word_index (:obj:`int`):
index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError("token_to_word() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_word(token_index)
def word_to_tokens(self, batch_or_word_index: int, word_index: Optional[int] = None) -> TokenSpan:
""" Get the encoded token span corresponding to a word in the sequence of the batch.
Token spans are returned as a TokenSpan NamedTuple with:
start: index of the first token
end: index of the token following the last token
Can be called as:
- self.word_to_tokens(word_index) if batch size is 1
- self.word_to_tokens(batch_index, word_index) if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprises one sequence,
this can be the index of the word in the sequence
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
token_span (:obj:`TokenSpan`):
Span of tokens in the encoded sequence.
TokenSpan are NamedTuple with:
start: index of the first token
end: index of the token following the last token
"""
if not self._encodings:
raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if word_index < 0:
word_index = self._seq_len + word_index
return TokenSpan(*(self._encodings[batch_index].word_to_tokens(word_index)))
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
""" Get the character span corresponding to an encoded token in a sequence of the batch.
Character spans are returned as a CharSpan NamedTuple with:
start: index of the first character in the original string associated to the token
end: index of the character following the last character in the original string associated to the token
Can be called as:
- self.token_to_chars(token_index) if batch size is 1
- self.token_to_chars(batch_index, token_index) if batch size is greater or equal to 1
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token or tokens in the sequence.
Returns:
char_span (:obj:`CharSpan`):
Span of characters in the original string.
CharSpan are NamedTuple with:
start: index of the first character in the original string
end: index of the character following the last character in the original string
"""
if not self._encodings:
raise ValueError("token_to_chars() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
def char_to_token(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
""" Get the index of the token in the encoded output comprising a character
in the original string for a sequence of the batch.
Can be called as:
- self.char_to_token(char_index) if batch size is 1
- self.char_to_token(batch_index, char_index) if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
token_index (:obj:`int`):
Index of the token.
"""
if not self._encodings:
raise ValueError("char_to_token() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_token(char_index)
def word_to_chars(self, batch_or_word_index: int, word_index: Optional[int] = None) -> CharSpan:
""" Get the character span in the original string corresponding to given word in a sequence
of the batch.
Character spans are returned as a CharSpan NamedTuple with:
start: index of the first character in the original string
end: index of the character following the last character in the original string
Can be called as:
- self.word_to_chars(word_index) if batch size is 1
- self.word_to_chars(batch_index, word_index) if batch size is greater or equal to 1
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
char_span (:obj:`CharSpan` or :obj:`List[CharSpan]`):
Span(s) of the associated character or characters in the string.
CharSpan are NamedTuple with:
start: index of the first character associated to the token in the original string
end: index of the character following the last character associated to the token in the original string
"""
if not self._encodings:
raise ValueError("word_to_chars() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index)))
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
""" Get the word in the original string corresponding to a character in the original string of
a sequence of the batch.
Can be called as:
- self.char_to_word(char_index) if batch size is 1
- self.char_to_word(batch_index, char_index) if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the character in the orginal string.
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the character in the orginal string.
Returns:
token_index (:obj:`int` or :obj:`List[int]`):
Index or indices of the associated encoded token(s).
"""
if not self._encodings:
raise ValueError("char_to_word() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_word(char_index)
@torch_required
def to(self, device: str):
"""Send all values to device by calling v.to(device)"""
self.data = {k: v.to(device) for k, v in self.data.items()}
return self
class SpecialTokensMixin:
""" SpecialTokensMixin is derived by ``PreTrainedTokenizer`` and ``PreTrainedTokenizerFast`` and
handles specific behaviors related to special tokens. In particular, this class hold the
attributes which can be used to directly access to these special tokens in a
model-independant manner and allow to set and update the special tokens.
"""
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
def __init__(self, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value)
setattr(self, key, value)
elif isinstance(value, AddedTokenFast):
setattr(self, key, str(value))
elif isinstance(value, str):
setattr(self, key, value)
else:
raise TypeError(
"special token {} has to be either str or AddedTokenFast but got: {}".format(key, type(value))
)
@property
def bos_token(self):
""" Beginning of sentence token (string). Log an error if used while not having been set. """
if self._bos_token is None:
logger.error("Using bos_token, but it is not set yet.")
return self._bos_token
@property
def eos_token(self):
""" End of sentence token (string). Log an error if used while not having been set. """
if self._eos_token is None:
logger.error("Using eos_token, but it is not set yet.")
return self._eos_token
@property
def unk_token(self):
""" Unknown token (string). Log an error if used while not having been set. """
if self._unk_token is None:
logger.error("Using unk_token, but it is not set yet.")
return self._unk_token
@property
def sep_token(self):
""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
if self._sep_token is None:
logger.error("Using sep_token, but it is not set yet.")
return self._sep_token
@property
def pad_token(self):
""" Padding token (string). Log an error if used while not having been set. """
if self._pad_token is None:
logger.error("Using pad_token, but it is not set yet.")
return self._pad_token
@property
def cls_token(self):
""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
if self._cls_token is None:
logger.error("Using cls_token, but it is not set yet.")
return self._cls_token
@property
def mask_token(self):
""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
if self._mask_token is None:
logger.error("Using mask_token, but it is not set yet.")
return self._mask_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
if self._additional_special_tokens is None:
logger.error("Using additional_special_tokens, but it is not set yet.")
return self._additional_special_tokens
def _maybe_update_backend(self, value):
""" To be overriden by derived class if a backend tokenizer has to be updated. """
pass
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
self._maybe_update_backend([value])
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
self._maybe_update_backend([value])
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
self._maybe_update_backend([value])
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
self._maybe_update_backend([value])
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
self._maybe_update_backend([value])
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
self._maybe_update_backend([value])
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
self._maybe_update_backend([value])
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
self._maybe_update_backend(value)
@property
def bos_token_id(self):
""" Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self):
""" Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self):
""" Id of the unknown token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self):
""" Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self):
""" Id of the padding token in the vocabulary. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.pad_token)
@property
def pad_token_type_id(self):
""" Id of the padding token type in the vocabulary."""
return self._pad_token_type_id
@property
def cls_token_id(self):
""" Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self):
""" Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self):
""" Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """
return self.convert_tokens_to_ids(self.additional_special_tokens)
@property
def special_tokens_map(self):
""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self):
""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
class PreTrainedTokenizer(SpecialTokensMixin):
""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods
downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
`short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
model has no maximum input size.
- ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
``from_pretrained()`` method.
Args:
- ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
no associated max_length can be found in ``max_model_input_sizes``.
- ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
Should be selected between ['right', 'left']
- ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
model ("token_type_ids", "attention_mask"...).
- ``bos_token``: (`Optional`) string: a beginning of sentence token.
Will be associated to ``self.bos_token`` and ``self.bos_token_id``
- ``eos_token``: (`Optional`) string: an end of sentence token.
Will be associated to ``self.eos_token`` and ``self.eos_token_id``
- ``unk_token``: (`Optional`) string: an unknown token.
Will be associated to ``self.unk_token`` and ``self.unk_token_id``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
Will be associated to ``self.sep_token`` and ``self.sep_token_id``
- ``pad_token``: (`Optional`) string: a padding token.
Will be associated to ``self.pad_token`` and ``self.pad_token_id``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
leveraging self-attention along the full depth of the model).
Will be associated to ``self.cls_token`` and ``self.cls_token_id``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
Adding all special tokens here ensure they won't be split by the tokenization process.
Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
"""
vocab_files_names: Dict[str, str] = {}
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
max_model_input_sizes: Dict[str, int] = {}
model_input_names: List[str] = ["token_type_ids", "attention_mask"]
padding_side: str = "right"
@property
def vocab_size(self) -> int:
""" Size of the base vocabulary (without the added tokens) """
raise NotImplementedError
@property
def is_fast(self) -> bool:
return False
@property
def max_len(self) -> int:
""" Kept here for backward compatibility.
Now renamed to `model_max_length` to avoid ambiguity.
"""
return self.model_max_length
@property
def max_len_single_sentence(self) -> int:
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
@property
def max_len_sentences_pair(self) -> int:
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
@max_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
""" For backward compatibility, allow to try to setup 'max_len_single_sentence' """
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False):
logger.warning(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
@max_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
""" For backward compatibility, allow to try to setup 'max_len_sentences_pair' """
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True):
logger.warning(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
def get_vocab(self):
""" Returns the vocabulary as a dict of {token: index} pairs. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. """
raise NotImplementedError()
def __init__(self, model_max_length=None, **kwargs):
super().__init__(**kwargs)
# For backward compatibility we fallback to set model_max_length from max_len if provided
if "max_len" in kwargs:
warnings.warn(
"Parameter max_len is deprecated and will be removed in a future release. "
"Use model_max_length instead.",
category=FutureWarning,
)
model_max_length = kwargs.pop("max_len")
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
# Padding side is right by default and overridden in subclasses. If specified in the kwargs, it is changed.
self.padding_side = kwargs.pop("padding_side", self.padding_side)
assert self.padding_side in [
"right",
"left",
], f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
# Added tokens
self.added_tokens_encoder = {}
self.unique_added_tokens_encoder = set()
self.added_tokens_decoder = {}
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = {}
def __len__(self):
""" Size of the full vocabulary with the added tokens """
return self.vocab_size + len(self.added_tokens_encoder)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes, deprecated) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the vocabulary files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (
cls.pretrained_init_configuration
and pretrained_model_name_or_path in cls.pretrained_init_configuration
):
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy()
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path
)
)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
if len(cls.vocab_files_names) > 1:
raise ValueError(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not supported."
"Use a model identifier or the path to a directory instead."
)
logger.warning(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated"
)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
}
# Look for the tokenizer main vocabulary files + the additional tokens files
for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
else:
full_file_name = hf_bucket_url(
pretrained_model_name_or_path, filename=file_name, use_cdn=False
)
vocab_files[file_id] = full_file_name
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir
)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = (
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path or url to a directory containing vocabulary files "
"named {}, but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
raise EnvironmentError(msg)
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
raise EnvironmentError(
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files "
"named {} but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(file_path, resolved_vocab_files[file_id]))
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if model_max_length is not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if key not in init_kwargs:
init_kwargs[key] = value
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# update unique_added_tokens_encoder with special tokens for correct tokenization
tokenizer.unique_added_tokens_encoder.update(set(tokenizer.all_special_tokens))
# Add supplementary tokens.
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
tokenizer.unique_added_tokens_encoder.update(set(tokenizer.added_tokens_encoder.keys()))
return tokenizer
def save_pretrained(self, save_directory):
""" Save the tokenizer vocabulary files together with:
- added tokens,
- special-tokens-to-class-attributes-mapping,
- tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
Warning: This won't save modifications you may have applied to the tokenizer after the instantiation
(e.g. modifying tokenizer.do_lower_case after creation).
This method make sure the full tokenizer can then be re-loaded using the
:func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
if not os.path.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
if len(self.added_tokens_encoder) > 0:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory) -> Tuple[str]:
""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full
Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained`
class method.
"""
raise NotImplementedError
def add_tokens(self, new_tokens: Union[str, List[str]]) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: string or list of string. Each string is a token to add. Tokens are only added if they are not
already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if not new_tokens:
return 0
if not isinstance(new_tokens, list):
new_tokens = [new_tokens]
tokens_to_add = []
for token in new_tokens:
assert isinstance(token, str)
if self.init_kwargs.get("do_lower_case", False) and token not in self.all_special_tokens:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.unique_added_tokens_encoder = set(self.added_tokens_encoder.keys()).union(set(self.all_special_tokens))
self.added_tokens_decoder.update(added_tok_decoder)
return len(tokens_to_add)
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
inside your training loop.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def add_special_tokens(self, special_tokens_dict):
"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- special tokens are carefully handled by the tokenizer (they are never split)
- you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>')
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value)
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str)
added_tokens += self.add_tokens([value])
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text: TextInput, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
Args:
text (:obj:`string`): The sequence to be encoded.
**kwargs (:obj: `dict`): Arguments passed to the model-specific `prepare_for_tokenization` preprocessing method.
"""
all_special_tokens = self.all_special_tokens
text = self.prepare_for_tokenization(text, **kwargs)
# TODO: should this be in the base class?
def lowercase_text(t):
# convert non-special tokens to lowercase
escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
return re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), t)
if self.init_kwargs.get("do_lower_case", False):
text = lowercase_text(text)
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
sub_text = sub_text.rstrip()
if i == 0 and not sub_text:
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self._tokenize(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_added_tokens_encoder:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return list(
itertools.chain.from_iterable(
(
self._tokenize(token) if token not in self.unique_added_tokens_encoder else [token]
for token in tokenized_text
)
)
)
added_tokens = self.unique_added_tokens_encoder
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
""" Converts a token string (or a sequence of tokens) in a single integer id
(or a sequence of ids), using the vocabulary.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary. Adds the model-specific
special tokens (such as beginning of sequence, end of sequence, sequence separator).
If specifying ``add_special_tokens=False``, same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary.
You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`.
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
**kwargs: passed to the `self.tokenize()` method
"""
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
max_length=max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
) -> BatchEncoding:
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the later only for not-fast tokenizers)):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`.
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Set to True to indicate the input is already tokenized
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`_
return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return (char_start, char_end) for each token (default False).
If using Python's tokenizer, this method will raise NotImplementedError.
This one is only available on fast tokenizers inheriting from PreTrainedTokenizerFast.
**kwargs: passed to the `self.tokenize()` method
Return:
A Dictionary of shape::
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True``
and return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError(
"Unable to set proper padding strategy as the tokenizer does not have a padding token. "
"In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
)
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
max_length=max_length,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
prepend_batch_axis=return_tensors is not None,
)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_masks: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_masks: bool = False,
return_offsets_mapping: bool = False,
return_lengths: bool = False,
**kwargs
) -> BatchEncoding:
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.
Args:
batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`,
:obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`,
and for not-fast tokenizers, also:
:obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`):
Batch of sequences or pair of sequences to be encoded.
This can be a list of string/string-sequences/int-sequences or a list of pair of
string/string-sequences/int-sequence (see details in encode_plus)
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
which can be set to the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Set to True to indicate the input is already tokenized
return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`_
return_attention_masks (:obj:`bool`, `optional`, defaults to :obj:`none`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_masks (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return (char_start, char_end) for each token (default False).
If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on
Rust-based tokenizers inheriting from PreTrainedTokenizerFast.
return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting dictionary will include the length of each encoded inputs
**kwargs: passed to the `self.tokenize()` method
Return:
A Dictionary of shape::
{
input_ids: list[List[int]],
token_type_ids: list[List[int]] if return_token_type_ids is True (default)
attention_mask: list[List[int]] if return_attention_mask is True (default)
overflowing_tokens: list[List[int]] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: List[int] if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[List[int]] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``attention_mask``: list of indices specifying which tokens should be attended to by the model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError(
"Unable to set proper padding strategy as the tokenizer does not have a padding token. In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via the function add_special_tokens if you want to use a padding strategy"
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers."
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
input_ids = []
for ids_or_pair_ids in batch_text_or_text_pairs:
if isinstance(ids_or_pair_ids, (list, tuple)) and len(ids_or_pair_ids) == 2 and not is_pretokenized:
ids, pair_ids = ids_or_pair_ids
else:
ids, pair_ids = ids_or_pair_ids, None
first_ids = get_input_ids(ids)
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
input_ids.append((first_ids, second_ids))
if max_length is None and pad_to_max_length:
def total_sequence_length(input_pairs):
first_ids, second_ids = input_pairs
return len(first_ids) + (
self.num_special_tokens_to_add()
if second_ids is None
else (len(second_ids) + self.num_special_tokens_to_add(pair=True))
)
max_length = max([total_sequence_length(ids) for ids in input_ids])
batch_outputs = {}
for first_ids, second_ids in input_ids:
# Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by
# the model. It adds special tokens, truncates sequences if overflowing while taking into account
# the special tokens and manages a window stride for overflowing tokens
outputs = self.prepare_for_model(
first_ids,
pair_ids=second_ids,
max_length=max_length,
pad_to_max_length=pad_to_max_length,
add_special_tokens=add_special_tokens,
stride=stride,
truncation_strategy=truncation_strategy,
return_attention_mask=return_attention_masks,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_masks,
return_lengths=return_lengths,
return_tensors=None, # We will convert the whole batch to tensors at the end
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
if return_tensors is not None:
convert_to_tensors(batch_outputs, return_tensors)
return BatchEncoding(batch_outputs)
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
max_length: Optional[int] = None,
add_special_tokens: bool = True,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_lengths: bool = False,
prepend_batch_axis: bool = False,
) -> BatchEncoding:
""" Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
stride: window stride for overflowing tokens. Can be useful to remove edge effect when using sequential
list of inputs. The overflowing token will contains a part of the previous window of tokens.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length.
The tokenizer padding sides are handled by the following strings:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
Defaults to False: no padding.
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default: set to model specifics).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False).
return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False).
return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting dictionary will include the length of each encoded inputs
prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set the resulting object will feature an extra dim at position 0.
This can be seen as an unsqueezing operator.
Return:
A Dictionary of shape::
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
length: int if return_lengths is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``token_type_ids``: list of token type ids to be fed to a model
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
- ``length``: this is the length of ``input_ids``
"""
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Truncation: Handle max sequence length
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
if max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Check lengths
assert max_length is None or len(encoded_inputs["input_ids"]) <= max_length
if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length:
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.model_max_length)
)
# Padding
needs_to_be_padded = pad_to_max_length and (
max_length
and len(encoded_inputs["input_ids"]) < max_length
or max_length is None
and len(encoded_inputs["input_ids"]) < self.model_max_length
and self.model_max_length <= LARGE_INTEGER
)
if pad_to_max_length and max_length is None and self.model_max_length > LARGE_INTEGER:
logger.warning(
"Sequence can't be padded as no maximum length is specified and the model maximum length is too high."
)
if needs_to_be_padded:
difference = (max_length if max_length is not None else self.model_max_length) - len(
encoded_inputs["input_ids"]
)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference
if return_token_type_ids:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"])
if return_token_type_ids:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
if return_lengths:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
# Prepare model inputs as tensors if asked
if return_tensors is not None:
convert_to_tensors(encoded_inputs, return_tensors, prepend_batch_axis)
return BatchEncoding(encoded_inputs)
def prepare_for_tokenization(self, text: str, **kwargs) -> str:
""" Performs any necessary transformations before tokenization """
return text
def truncate_sequences(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: str = "longest_first",
stride: int = 0,
) -> Tuple[List[int], List[int], List[int]]:
""" Truncates a sequence pair in place to the maximum length.
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
num_tokens_to_remove (:obj:`int`, `optional`, defaults to ``0``):
number of tokens to remove using the truncation strategy
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences).
Overflowing tokens only contains overflow from the first sequence.
- 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if truncation_strategy == "longest_first":
overflowing_tokens = []
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == "only_first":
assert len(ids) > num_tokens_to_remove
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
elif truncation_strategy == "only_second":
assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif truncation_strategy == "do_not_truncate":
raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.")
else:
raise ValueError(
"Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']"
)
return (ids, pair_ids, overflowing_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0: List, token_ids_1: Optional[List] = None) -> List[int]:
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(self, token_ids_0: List, token_ids_1: Optional[List] = None) -> List:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens. This implementation does not add special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[int, List[int]]:
""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""
return " ".join(self.convert_ids_to_tokens(tokens))
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods.
skip_special_tokens: if set to True, will replace special tokens.
clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separatly for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = " ".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def batch_decode(self, sequences: List[List[int]], **kwargs) -> List[str]:
return [self.decode(seq, **kwargs) for seq in sequences]
@staticmethod
def clean_up_tokenization(out_string: str) -> str:
""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string
class PreTrainedTokenizerFast(PreTrainedTokenizer):
""" Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).
Inherit from PreTrainedTokenizer.
Handle all the shared methods for tokenization and special tokens as well as methods
downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
`short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
model has no maximum input size.
- ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
``from_pretrained()`` method.
Args:
- ``tokenizer`` (`BaseTokenizerFast`): A Fast tokenizer from the HuggingFace tokenizer library (in low level Rust language)
- ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
no associated max_length can be found in ``max_model_input_sizes``.
- ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
Should be selected between ['right', 'left']
- ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
model ("token_type_ids", "attention_mask"...).
- ``bos_token``: (`Optional`) string: a beginning of sentence token.
Will be associated to ``self.bos_token`` and ``self.bos_token_id``
- ``eos_token``: (`Optional`) string: an end of sentence token.
Will be associated to ``self.eos_token`` and ``self.eos_token_id``
- ``unk_token``: (`Optional`) string: an unknown token.
Will be associated to ``self.unk_token`` and ``self.unk_token_id``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
Will be associated to ``self.sep_token`` and ``self.sep_token_id``
- ``pad_token``: (`Optional`) string: a padding token.
Will be associated to ``self.pad_token`` and ``self.pad_token_id``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
leveraging self-attention along the full depth of the model).
Will be associated to ``self.cls_token`` and ``self.cls_token_id``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
Adding all special tokens here ensure they won't be split by the tokenization process.
Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
"""
def __init__(self, tokenizer: BaseTokenizerFast, **kwargs):
if not isinstance(tokenizer, BaseTokenizerFast):
raise ValueError(
"Tokenizer should be an instance of a Tokenizer " "provided by HuggingFace tokenizers library."
)
self._tokenizer: BaseTokenizerFast = tokenizer
# Initialize all the rest of the kwargs
super().__init__(**kwargs)
@property
def backend_tokenizer(self) -> BaseTokenizerFast:
return self._tokenizer
@property
def decoder(self) -> DecoderFast:
return self._tokenizer._tokenizer.decoder
@property
def is_fast(self) -> bool:
return True
@property
def vocab_size(self) -> int:
return self._tokenizer.get_vocab_size(with_added_tokens=False)
def __len__(self) -> int:
return self._tokenizer.get_vocab_size(with_added_tokens=True)
def _maybe_update_backend(self, value):
""" Update the backend fast tokenizer.
Override method from base class SpecialTokensMixin """
self._tokenizer.add_special_tokens(value)
def _convert_encoding(
self,
encoding: EncodingFast,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
) -> Dict[str, Any]:
""" Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict.
Overflowing tokens are converted to additional examples (like batches) so the output values of
the dict are lists (overflows) of lists (tokens).
If return_tensors is not None, these lists of lists are converted to 2-D tensors
for input_ids, token_type_ids and attention_mask.
Output shape: (overflows, sequence length)
"""
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
for e in encodings:
encoding_dict["input_ids"].append(e.ids)
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append(e.offsets)
if return_tensors is not None:
encoding_dict = convert_to_tensors(encoding_dict, return_tensors)
return encoding_dict
def _convert_token_to_id_with_added_voc(self, token: int) -> str:
index = self._tokenizer.token_to_id(token)
if index is None:
return self.unk_token_id
return index
def _convert_id_to_token(self, index: int) -> Optional[str]:
return self._tokenizer.id_to_token(int(index))
def get_vocab(self):
return self._tokenizer.get_vocab(True)
def convert_tokens_to_string(self, tokens: List[int], skip_special_tokens: bool = False) -> str:
return self._tokenizer.decode(tokens, skip_special_tokens)
def add_tokens(self, new_tokens: List[Union[str, AddedTokenFast]]) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: string or list of string or AddedTokenFast. Each string is a token to add.
Tokens are only added if they are not already in the vocabulary. AddedTokenFast wrap a string token to let you personnalize it's behavior (Whether this token should only match against single word, whether this token should strip all potential whitespaces on the left side, Whether this token should strip all potential whitespaces on the right side...).
See details for AddedToken in HuggingFace tokenizers library.
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if isinstance(new_tokens, str):
new_tokens = [new_tokens]
return self._tokenizer.add_tokens(new_tokens)
def add_special_tokens(self, special_tokens_dict: dict) -> int:
# Map special tokens to class attributes (self.pad_token...)
super().add_special_tokens(special_tokens_dict)
# If the backend tokenizer the only specificities of special tokens are that
# - they will never be processed by the model, and
# - they will be removed while decoding.
# But they are not mapped to special attributes in the backend so we can just
# send a list.
tokens = []
for token in special_tokens_dict.values():
if isinstance(token, list):
tokens += token
else:
tokens += [token]
num_added_tokens = self._tokenizer.add_special_tokens(tokens)
return num_added_tokens
def num_special_tokens_to_add(self, pair: bool = False) -> int:
return self._tokenizer.num_special_tokens_to_add(pair)
def tokenize(
self, text: TextInput, pair: Optional[TextInput] = None, add_special_tokens: bool = False
) -> List[str]:
return self._tokenizer.encode(text, pair, add_special_tokens).tokens
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
],
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
truncation_strategy: str = "longest_first",
pad_to_max_length: bool = False,
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_lengths: bool = False,
**kwargs
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise ValueError(
"batch_text_or_text_pairs has to be a list (got {})".format(type(batch_text_or_text_pairs))
)
# Needed if we have to return a tensor
pad_to_max_length = pad_to_max_length or (return_tensors is not None and len(batch_text_or_text_pairs) > 1)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None:
raise ValueError("Unable to set proper padding strategy as the tokenizer does not have a padding token")
# Set the truncation and padding strategy and restore the initial configuration
with truncate_and_pad(
tokenizer=self._tokenizer,
max_length=max_length,
stride=stride,
strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
padding_side=self.padding_side,
pad_token_id=self.pad_token_id if self._pad_token is not None else None,
pad_token_type_id=self.pad_token_type_id,
pad_token=self._pad_token,
):
# Check for the pretokenized path
if is_pretokenized:
encodings = []
# Iterate over each sample (we don't know yet if they are pairs or simple input
for i, sample in enumerate(batch_text_or_text_pairs):
if not isinstance(sample, (list, tuple)):
raise TypeError(
"batch_encode_plus(..., is_pretokenized=True) requires batch_text_or_text_pairs "
"to be either List[List[str]] or List[Tuple[List[str], List[str]]] but sample at "
"index {} is of type {}".format(i, type(sample))
)
# Test if we have a pair of sentences by checking the depth of nesting
is_pair = bool(len(sample) > 0 and isinstance(sample[0], (list, tuple)))
# Take care of the first sequence - we multi-thread over the words
encodings_text = EncodingFast.merge(
self._tokenizer.encode_batch(sample[0] if is_pair else sample, add_special_tokens=False),
growing_offsets=True,
)
# Take care of the second sequence if we have a pair
if is_pair:
encodings_pair = EncodingFast.merge(
self._tokenizer.encode_batch([("", s) for s in sample[1]], add_special_tokens=False),
growing_offsets=True,
)
else:
encodings_pair = None
# Post-process - truncate/pad and add special tokens
encoding = self._tokenizer.post_process(encodings_text, encodings_pair, add_special_tokens)
encodings.append(encoding)
# Classical path with strings input
else:
# Avoid thread overhead if only one example.
if len(batch_text_or_text_pairs) == 1:
if isinstance(batch_text_or_text_pairs[0], (tuple, list)):
encodings = self._tokenizer.encode(
*batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens
)
else:
encodings = self._tokenizer.encode(
batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens
)
encodings = [encodings]
else:
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs, add_special_tokens=add_special_tokens
)
# Convert encoding to dict
# `Tokens` has type: List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens = [
self._convert_encoding(
encoding=encoding,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
)
for encoding in encodings
]
# Sanitize the output to have dict[list] from list[dict]
sanitized = {}
for key in tokens[0].keys():
# To List[List[List[int]]] of shape (batch, overflows, sequence length)
stack = [e for item in tokens for e in item[key]]
if return_tensors == "tf":
stack = tf.stack(stack, axis=0)
elif return_tensors == "pt":
stack = torch.stack(stack, dim=0)
# elif not return_tensors and len(stack) == 1:
# stack = stack[0]
sanitized[key] = stack
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = flatten([[i] * len(enc["input_ids"]) for i, enc in enumerate(tokens)])
sanitized["overflow_to_sample_mapping"] = overflow_to_sample_mapping
return BatchEncoding(sanitized, encodings)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
pad_to_max_length: bool = False,
stride: int = 0,
truncation_strategy: str = "longest_first",
is_pretokenized: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
**kwargs
) -> BatchEncoding:
# Check for pretokenized path (ie [token1, token2, ..., tokenN] -> [id1, id2, ..., idN]
if is_pretokenized:
if isinstance(text, list) and len(text) > 0:
# Encode through encode_batch with sequence of only one word which will be merged after hand
encoding = self._tokenizer.encode_batch(text, add_special_tokens=False)
encoding = EncodingFast.merge(encoding, growing_offsets=True)
# Let's do the same for pairs if provided
if isinstance(text_pair, list):
# We prepend empty string before each word so that encoding is aware content is a pair
encoding_pair = self._tokenizer.encode_batch(
[("", p) for p in text_pair], add_special_tokens=False
)
encoding_pair = EncodingFast.merge(encoding_pair, growing_offsets=True)
elif text_pair is None:
encoding_pair = None
else:
raise TypeError(
"encode_plus(..., is_pretokenized=True) requires text and text_pair to be List[str] "
"but got (text={}, text_pair={})".format(type(text), type(text_pair))
)
# Post process and if asked to do so, insert special tokens where needed
encoding = self._tokenizer.post_process(encoding, encoding_pair, add_special_tokens)
batched_output = BatchEncoding(
self._convert_encoding(
encoding,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
),
encoding,
)
else:
raise TypeError(
"encode_plus(..., is_pretokenized=True) requires text to be List[str] "
"but got (text={}, text_pair={})".format(type(text), type(text_pair))
)
else:
batched_input = [(text, text_pair)] if text_pair else [text]
batched_output = self.batch_encode_plus(
batched_input,
add_special_tokens=add_special_tokens,
max_length=max_length,
stride=stride,
truncation_strategy=truncation_strategy,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
pad_to_max_length=pad_to_max_length,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
if not return_tensors:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
return batched_output
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
text = self._tokenizer.decode(token_ids, skip_special_tokens)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory: str) -> Tuple[str]:
if os.path.isdir(save_directory):
files = self._tokenizer.save(save_directory)
else:
folder, file = os.path.split(os.path.abspath(save_directory))
files = self._tokenizer.save(folder, name=file)
return tuple(files)
def trim_batch(
input_ids, pad_token_id, attention_mask=None,
):
"""Remove columns that are populated exclusively by pad_token_id"""
keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
"bert-base-german-cased": "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
"bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt",
"bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-vocab.txt",
"TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/vocab.txt",
"TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/vocab.txt",
"wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(PreTrainedTokenizer):
r"""
Constructs a BERT tokenizer. Based on WordPiece.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
should refer to the superclass for more information regarding methods.
Args:
vocab_file (:obj:`string`):
File containing the vocabulary.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lowercase the input when tokenizing.
do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to do basic tokenization before WordPiece.
never_split (:obj:`Iterable`, `optional`, defaults to :obj:`None`):
Collection of tokens which will never be split during tokenization. Only has an effect when
:obj:`do_basic_tokenize=True`
unk_token (:obj:`string`, `optional`, defaults to "[UNK]"):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`string`, `optional`, defaults to "[SEP]"):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
for sequence classification or for a text and a question for question answering.
It is also used as the last token of a sequence built with special tokens.
pad_token (:obj:`string`, `optional`, defaults to "[PAD]"):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`string`, `optional`, defaults to "[CLS]"):
The classifier token which is used when doing sequence classification (classification of the whole
sequence instead of per-token classification). It is the first token of the sequence when built with
special tokens.
mask_token (:obj:`string`, `optional`, defaults to "[MASK]"):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/transformers/issues/328
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
**kwargs
):
super().__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
# If the token is part of the never_split set
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True if the token list is already formatted with special tokens for the model
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
if token_ids_1 is None, only returns the first portion of the mask (0's).
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, vocab_path):
"""
Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory.
Args:
vocab_path (:obj:`str`):
The directory in which to save the vocabulary.
Returns:
:obj:`Tuple(str)`: Paths to the files saved.
"""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["vocab_file"])
else:
vocab_file = vocab_path
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file)
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
""" Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
class BertTokenizerFast(PreTrainedTokenizerFast):
r"""
Constructs a "Fast" BERT tokenizer (backed by HuggingFace's `tokenizers` library).
Bert tokenization is Based on WordPiece.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the methods. Users
should refer to the superclass for more information regarding methods.
Args:
vocab_file (:obj:`string`):
File containing the vocabulary.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lowercase the input when tokenizing.
unk_token (:obj:`string`, `optional`, defaults to "[UNK]"):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`string`, `optional`, defaults to "[SEP]"):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
for sequence classification or for a text and a question for question answering.
It is also used as the last token of a sequence built with special tokens.
pad_token (:obj:`string`, `optional`, defaults to "[PAD]"):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`string`, `optional`, defaults to "[CLS]"):
The classifier token which is used when doing sequence classification (classification of the whole
sequence instead of per-token classification). It is the first token of the sequence when built with
special tokens.
mask_token (:obj:`string`, `optional`, defaults to "[MASK]"):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/transformers/issues/328
clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to clean the text before tokenization by removing any control characters and
replacing all whitespaces by the classic one.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/transformers/issues/328
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
clean_text=True,
tokenize_chinese_chars=True,
strip_accents=True,
wordpieces_prefix="##",
**kwargs
):
super().__init__(
BertWordPieceTokenizer(
vocab_file=vocab_file,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
clean_text=clean_text,
handle_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
lowercase=do_lower_case,
wordpieces_prefix=wordpieces_prefix,
),
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
if token_ids_1 is None, only returns the first portion of the mask (0's).
Args:
token_ids_0 (:obj:`List[int]`):
List of ids.
token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | dims5 | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_DIMS5_H
#define TT2I_DIMS5_H
#include "NvInfer.h"
namespace tts
{
class Dims5 : public nvinfer1::Dims
{
public:
using DimensionType = nvinfer1::DimensionType;
/**
* @brief Create a new five dimensional Dims struct.
*
* @param n The first dimension.
* @param m The second dimension.
* @param c The third dimension.
* @param h The fourth dimension.
* @param w The fifth dimension.
*/
Dims5(const int n, const int m, const int c, const int h, const int w)
: Dims()
{
nbDims = 5;
d[0] = n;
d[1] = m;
d[2] = c;
d[3] = h;
d[4] = w;
}
};
} // namespace tts
#endif
|
TensorFlow2/LanguageModeling/ELECTRA | ELECTRA | run_inference | # Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import time
import argparse
import json
import logging
import collections
import tensorflow as tf
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
from configuration import ElectraConfig
from modeling import TFElectraForQuestionAnswering
from tokenization import ElectraTokenizer
from squad_utils import SquadResult, RawResult, _get_best_indices
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/electra-small-generator",
"google/electra-base-generator",
"google/electra-large-generator",
"google/electra-small-discriminator",
"google/electra-base-discriminator",
"google/electra-large-discriminator",
# See all ELECTRA models at https://huggingface.co/models?filter=electra
]
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--electra_model", default=None, type=str, required=True,
help="Model selected in the list: " + ", ".join(TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST))
parser.add_argument("--init_checkpoint",
default=None,
type=str,
required=True,
help="The checkpoint file from pretraining")
parser.add_argument("--question",
default=None,
type=str,
required=True,
help="Question")
parser.add_argument("--context",
default=None,
type=str,
required=True,
help="Context")
parser.add_argument(
"--joint_head",
default=True,
type=bool,
help="Jointly predict the start and end positions",
)
parser.add_argument(
"--beam_size",
default=4,
type=int,
help="Beam size when doing joint predictions",
)
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json "
"output file.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument('--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.')
parser.add_argument('--null_score_diff_threshold',
type=float, default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.")
args = parser.parse_args()
return args
def get_predictions_joint_head(start_indices, end_indices, result, max_len, args):
predictions = []
for i in range(args.beam_size):
start_index = start_indices[i]
for j in range(args.beam_size):
# for end_index in end_indices:
end_index = end_indices[i * args.beam_size + j]
if start_index >= max_len:
continue
if end_index >= max_len:
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > args.max_answer_length:
continue
predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[i],
end_logit=result.end_logits[i * args.beam_size + j]))
return predictions
def get_predictions(start_indices, end_indices, result, max_len, args):
predictions = []
for start_index in start_indices:
for end_index in end_indices:
if start_index >= max_len:
continue
if end_index >= max_len:
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > args.max_answer_length:
continue
predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
return predictions
def main():
args = parse_args()
print("***** Loading tokenizer and model *****")
electra_model = args.electra_model
config = ElectraConfig.from_pretrained(electra_model)
tokenizer = ElectraTokenizer.from_pretrained(electra_model)
model = TFElectraForQuestionAnswering.from_pretrained(electra_model, config=config, args=args)
print("***** Loading fine-tuned checkpoint: {} *****".format(args.init_checkpoint))
model.load_weights(args.init_checkpoint, by_name=False, skip_mismatch=False).expect_partial()
question, text = args.question, args.context
encoding = tokenizer.encode_plus(question, text, return_tensors='tf')
input_ids, token_type_ids, attention_mask = encoding["input_ids"], encoding["token_type_ids"], \
encoding["attention_mask"]
all_tokens = tokenizer.convert_ids_to_tokens(input_ids.numpy()[0])
if not args.joint_head:
start_logits, end_logits = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)[:2]
start_logits = start_logits[0].numpy().tolist()
end_logits = end_logits[0].numpy().tolist()
result = RawResult(unique_id=0,
start_logits=start_logits,
end_logits=end_logits)
start_indices = _get_best_indices(result.start_logits, args.n_best_size)
end_indices = _get_best_indices(result.end_logits, args.n_best_size)
predictions = get_predictions(start_indices, end_indices, result, len(all_tokens), args)
null_score = result.start_logits[0] + result.end_logits[0]
else:
outputs = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
output = [output[0].numpy().tolist() for output in outputs]
start_logits = output[0]
start_top_index = output[1]
end_logits = output[2]
end_top_index = output[3]
cls_logits = output[4]
result = SquadResult(
0,
start_logits,
end_logits,
start_top_index=start_top_index,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
predictions = get_predictions_joint_head(result.start_top_index, result.end_top_index, result, len(all_tokens), args)
null_score = result.cls_logits
predictions = sorted(predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
answer = predictions[0]
answer = ' '.join(all_tokens[answer.start_index: answer.end_index + 1])
if args.null_score_diff_threshold > null_score and args.version_2_with_negative:
answer = ''
print(answer)
return answer
if __name__ == "__main__":
main()
|
PyTorch/LanguageModeling/BERT | BERT | requirements | # progress bars in model download and training scripts
tqdm
# Accessing files from S3 directly.
boto3
# Used for downloading models over HTTP
requests
six
ipdb
#Data processing
h5py
nltk
#Others
onnxruntime
git+https://github.com/NVIDIA/dllogger
|
Tools/PyTorch/TimeSeriesPredictionPlatform/data | data | xgb_util | # Copyright 2022 NVIDIA Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import os
def select_per_group(df, start, end):
'''
Groups the dataframe by the _id_ and grabs elements on the slice start to end. The resulting array
is concat to a dataframe.
'''
result = []
for _, g in df.groupby("_id_"):
result.append(g[start:end])
return pd.concat((result))
def select_test_group(df, encoder, example):
'''
Purpose of the function is to create the dataframe to pass to the xgboost predict. After grouping by
the _id_, each group has elements selected such that all complete time-series are chosen.
'''
final = []
for _, g in df.groupby("_id_"):
final.append(g[encoder-1: encoder + len(g) - example])
return pd.concat((final))
def load_xgb_df(dest_path, features, ds_type):
'''
Loads and does some light preprocessing on the train, valid and test.
First the csvs are read for each, then the features not present in the feature spec are dropped,
and finally the features with datatype as object are dropped. The final step is to prevent issues with
xgboost training and cuDF casting.
'''
path = dest_path
if not isinstance(path, pd.DataFrame):
df = pd.read_csv(os.path.join(path, f"{ds_type}.csv"))
else:
df = path
all_features = [f.name for f in features] + ['_id_']
all_read = df.columns
to_drop = [c for c in all_read if c not in all_features]
df.drop(columns=to_drop, inplace=True)
object_columns = [c for c, d in zip(df.columns, df.dtypes) if d == "object"]
df.drop(columns=object_columns, inplace=True)
return df
def xgb_multiID_preprocess(df, features, time_series_count):
date = [feature.name for feature in features if feature.feature_type == "TIME"][0]
target = [feature.name for feature in features if feature.feature_type == "TARGET"][0]
time_series_count = time_series_count
target_values = []
for _, g in df.groupby("_id_"):
target_values.append(g[[date, target]])
final = target_values[0]
final.rename(columns={target: f'{target}_{0}'}, inplace=True)
for i in range(1, time_series_count):
target_values[i].rename(columns={target: f'{target}_{i}'}, inplace=True)
final = final.merge(target_values[i], on=date, how='outer')
df = df.merge(final, on=date, how='outer')
return df
def feat_adder(df, lag_feats, rolling_feats):
'''
Main data preprocessing function for xgboost. lag_feats and rolling_feats are both
dictionaries from features to lists. After grouping by the _id_
we iterate through the lag features and move down the features i steps in the history.
Similarly the rolling_feats are iterated through and the moving average of the past i time steps
of that feature is added as a feature. The names of the new lag features are the
{feature_name}_{i}_lag and of the new rolling features are {feature_name}_{i}_rolling.
'''
final = []
for _, g in df.groupby("_id_"):
for f, v in lag_feats.items():
for i in v:
g['{}_{}_lag'.format(f, i)] = g[f].shift(i)
for f, v in rolling_feats.items():
for i in v:
g['{}_{}_rolling'.format(f, i)] = g[f].rolling(i).sum()
final.append(g)
return pd.concat((final))
def data_label_split(df, target):
'''
Drops rows with NaN as the target value. In addition separates the labels from
the data by doing an inplace drop.
'''
df.dropna(subset=target, inplace=True)
labels = df[target]
df.drop(target, 1, inplace=True)
return labels
def target_shift(df, target, feat, i):
'''
Brings features up that are (i+1) time steps in the future. Currently these features are
the target and the known/static variables. This future target is the value that will be predicted in the trainer.
These features have an _target added to their name.
'''
in_feat = target + feat
out_feat = [f'{i}_target' for i in in_feat]
df[out_feat] = df.groupby("_id_")[in_feat].shift(-1 * (i+1))
return df |
TensorFlow/LanguageModeling/BERT/data | data | WikicorpusTextFormatting | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
class WikicorpusTextFormatting:
def __init__(self, wiki_path, output_filename, recursive = False):
self.wiki_path = wiki_path
self.recursive = recursive
self.output_filename = output_filename
# This puts one article per line
def merge(self):
with open(self.output_filename, mode='w', newline='\n') as ofile:
for dirname in glob.glob(self.wiki_path + '/*/', recursive=False):
for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive):
print(filename)
article_lines = []
article_open = False
with open(filename, mode='r', newline='\n') as file:
for line in file:
if '<doc id=' in line:
article_open = True
elif '</doc>' in line:
article_open = False
for oline in article_lines[1:]:
if oline != '\n':
ofile.write(oline.rstrip() + " ")
ofile.write("\n\n")
article_lines = []
else:
if article_open:
article_lines.append(line) |
PyTorch/Classification/GPUNet/triton/runner/maintainer | maintainer | maintainer | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Any, Dict, List, Optional, Union
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .container import Container
class Maintainer(abc.ABC):
@abc.abstractmethod
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> Container:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
Container object
"""
pass
@abc.abstractmethod
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
pass
|
PyTorch/Classification/GPUNet/triton/scripts/docker | docker | interactive | #!/usr/bin/env bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DATASET_PATH=${1:-"/data/"}
NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=0}
docker run -it --rm \
--runtime=nvidia \
-e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \
--net=host \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
--ipc=host \
-e WORKDIR="$(pwd)" \
-e PYTHONPATH="$(pwd)" \
-v "$(pwd)":"$(pwd)" \
-v "$(pwd)":/workspace/gpunet/ \
-v ${DATASET_PATH}:"$(pwd)"/datasets/imagenet/ \
-v /var/run/docker.sock:/var/run/docker.sock \
-w "$(pwd)" \
gpunet:latest bash
|
TensorFlow2/Detection/Efficientdet/efficientnet/blocks | blocks | conv2d_block | import tensorflow as tf
from typing import Any, Dict, Optional, Text, Tuple
from model import normalization_builder
__all__ = ['conv2d_block']
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_in',
# Note: this is a truncated normal distribution
'distribution': 'normal'
}
}
def conv2d_block(inputs: tf.Tensor,
conv_filters: Optional[int],
config: dict,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
name: Text = None):
"""A conv2d followed by batch norm and an activation."""
batch_norm = normalization_builder.batch_norm_class()
bn_momentum = config['bn_momentum']
bn_epsilon = config['bn_epsilon']
data_format = tf.keras.backend.image_data_format()
weight_decay = config['weight_decay']
name = name or ''
# Collect args based on what kind of conv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_conv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
}
CONV_KERNEL_INITIALIZER['config']['mode'] = config['weight_init']
if depthwise:
conv2d = tf.keras.layers.DepthwiseConv2D
init_kwargs.update({'depthwise_initializer': CONV_KERNEL_INITIALIZER})
else:
conv2d = tf.keras.layers.Conv2D
init_kwargs.update({'filters': conv_filters,
'kernel_initializer': CONV_KERNEL_INITIALIZER})
x = conv2d(**init_kwargs)(inputs)
if use_batch_norm:
bn_axis = 1 if data_format == 'channels_first' else -1
x = batch_norm(axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + '_bn')(x)
if activation is not None:
x = tf.keras.layers.Activation(activation,
name=name + '_activation')(x)
return x |
TensorFlow/Segmentation/VNet | VNet | export | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import tensorflow as tf
from utils.data_loader import MSDDataset
from utils.model_fn import vnet_v2
from utils.tf_export import to_savedmodel, to_tf_trt, to_onnx
PARSER = argparse.ArgumentParser(description="V-Net")
PARSER.add_argument('--to', dest='to', choices=['savedmodel', 'tftrt', 'onnx'], required=True)
PARSER.add_argument('--use_amp', dest='use_amp', action='store_true', default=False)
PARSER.add_argument('--use_xla', dest='use_xla', action='store_true', default=False)
PARSER.add_argument('--compress', dest='compress', action='store_true', default=False)
PARSER.add_argument('--input_shape',
nargs='+',
type=int,
help="""Model's input shape""")
PARSER.add_argument('--data_dir',
type=str,
help="""Directory where the dataset is located""")
PARSER.add_argument('--checkpoint_dir',
type=str,
help="""Directory where the checkpoint is located""")
PARSER.add_argument('--savedmodel_dir',
type=str,
help="""Directory where the savedModel is located""")
PARSER.add_argument('--precision',
type=str,
choices=['FP32', 'FP16', 'INT8'],
help="""Precision for the model""")
def main():
"""
Starting point of the application
"""
flags = PARSER.parse_args()
if flags.to == 'savedmodel':
params = {
'labels': ['0', '1', '2'],
'batch_size': 1,
'input_shape': flags.input_shape,
'convolution_size': 3,
'downscale_blocks': [3, 3, 3],
'upscale_blocks': [3, 3],
'upsampling': 'transposed_conv',
'pooling': 'conv_pool',
'normalization_layer': 'batchnorm',
'activation': 'relu'
}
to_savedmodel(input_shape=flags.input_shape,
model_fn=vnet_v2,
checkpoint_dir=flags.checkpoint_dir,
output_dir='./saved_model',
input_names=['IteratorGetNext'],
output_names=['vnet/loss/total_loss_ref'],
use_amp=flags.use_amp,
use_xla=flags.use_xla,
compress=flags.compress,
params=argparse.Namespace(**params))
if flags.to == 'tftrt':
ds = MSDDataset(json_path=flags.data_dir + "/dataset.json",
interpolator='linear')
iterator = ds.test_fn(count=1).make_one_shot_iterator()
features = iterator.get_next()
sess = tf.Session()
def input_data():
return {'input_tensor:0': sess.run(features)}
to_tf_trt(savedmodel_dir=flags.savedmodel_dir,
output_dir='./tf_trt_model',
precision=flags.precision,
feed_dict_fn=input_data,
num_runs=1,
output_tensor_names=['vnet/Softmax:0'],
compress=flags.compress)
if flags.to == 'onnx':
raise NotImplementedError('Currently ONNX not supported for 3D models')
if __name__ == '__main__':
main()
|
PyTorch/SpeechRecognition/Jasper/triton/scripts | scripts | run_client | #!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SCRIPT_DIR=$(cd $(dirname $0); pwd)
PROJECT_DIR=${SCRIPT_DIR}/../..
MODEL_TYPE=${1:-"ts-trace"}
DATA_DIR=${2} # folder with data
FILE=${3} # json manifest file, OR single wav file
JASPER_CONTAINER_TAG=${JASPER_CONTAINER_TAG:-jasper:triton}
if [ "$#" -ge 1 ] && [ "${FILE: -4}" == ".wav" ]; then
CMD="python /jasper/triton/jasper-client.py --data_dir /data --audio_filename ${FILE} --model_platform ${MODEL_TYPE}"
ARGS=""
ARGS="$ARGS -v $DATA_DIR:/data"
elif [ "$#" -ge 1 ] && [ "${FILE: -4}" == "json" ]; then
ARGS=""
ARGS="$ARGS -v $DATA_DIR:/data"
CMD="python /jasper/triton/jasper-client.py --manifest_filename ${FILE} --model_platform ${MODEL_TYPE} --data_dir /data"
else
ARGS="-it"
CMD=""
fi
echo "========== STARTING ${JASPER_CONTAINER_TAG} =========="
set -x
nvidia-docker run --rm -it \
--net=host \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-v ${PROJECT_DIR}:/jasper \
--name=jasper-triton-client \
${ARGS} ${JASPER_CONTAINER_TAG} ${CMD}
set +x
|
TensorFlow/Segmentation/UNet_Industrial | UNet_Industrial | requirements | git+https://github.com/NVIDIA/dllogger#egg=dllogger |
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils | utils | logger | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import sys
def setup_logger(name, save_dir, distributed_rank):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, "log.txt"))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def format_step(step):
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
s += "Training Iteration: {} ".format(step[0])
if len(step) > 1:
s += "Training Epoch: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
return s
|
TensorFlow/Detection/SSD/models/research/object_detection | object_detection | export_tflite_ssd_graph_lib | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports an SSD detection model to use with tf-lite.
See export_tflite_ssd_graph.py for usage.
"""
import os
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.tools.graph_transforms import TransformGraph
from object_detection import exporter
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import post_processing_builder
from object_detection.core import box_list
_DEFAULT_NUM_CHANNELS = 3
_DEFAULT_NUM_COORD_BOX = 4
def get_const_center_size_encoded_anchors(anchors):
"""Exports center-size encoded anchors as a constant tensor.
Args:
anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor
boxes
Returns:
encoded_anchors: a float32 constant tensor of shape [num_anchors, 4]
containing the anchor boxes.
"""
anchor_boxlist = box_list.BoxList(anchors)
y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes()
num_anchors = y.get_shape().as_list()
with tf.Session() as sess:
y_out, x_out, h_out, w_out = sess.run([y, x, h, w])
encoded_anchors = tf.constant(
np.transpose(np.stack((y_out, x_out, h_out, w_out))),
dtype=tf.float32,
shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX],
name='anchors')
return encoded_anchors
def append_postprocessing_op(frozen_graph_def,
max_detections,
max_classes_per_detection,
nms_score_threshold,
nms_iou_threshold,
num_classes,
scale_values,
detections_per_class=100,
use_regular_nms=False):
"""Appends postprocessing custom op.
Args:
frozen_graph_def: Frozen GraphDef for SSD model after freezing the
checkpoint
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
nms_score_threshold: Score threshold used in Non-maximal suppression in
post-processing
nms_iou_threshold: Intersection-over-union threshold used in Non-maximal
suppression in post-processing
num_classes: number of classes in SSD detector
scale_values: scale values is a dict with following key-value pairs
{y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode
centersize boxes
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead
of Fast NMS.
Returns:
transformed_graph_def: Frozen GraphDef with postprocessing custom op
appended
TFLite_Detection_PostProcess custom op node has four outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations
detection_classes: a float32 tensor of shape [1, num_boxes]
with class indices
detection_scores: a float32 tensor of shape [1, num_boxes]
with class scores
num_boxes: a float32 tensor of size 1 containing the number of detected
boxes
"""
new_output = frozen_graph_def.node.add()
new_output.op = 'TFLite_Detection_PostProcess'
new_output.name = 'TFLite_Detection_PostProcess'
new_output.attr['_output_quantized'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['_output_types'].list.type.extend([
types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT,
types_pb2.DT_FLOAT
])
new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom(
attr_value_pb2.AttrValue(b=True))
new_output.attr['max_detections'].CopyFrom(
attr_value_pb2.AttrValue(i=max_detections))
new_output.attr['max_classes_per_detection'].CopyFrom(
attr_value_pb2.AttrValue(i=max_classes_per_detection))
new_output.attr['nms_score_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_score_threshold.pop()))
new_output.attr['nms_iou_threshold'].CopyFrom(
attr_value_pb2.AttrValue(f=nms_iou_threshold.pop()))
new_output.attr['num_classes'].CopyFrom(
attr_value_pb2.AttrValue(i=num_classes))
new_output.attr['y_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop()))
new_output.attr['x_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop()))
new_output.attr['h_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop()))
new_output.attr['w_scale'].CopyFrom(
attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop()))
new_output.attr['detections_per_class'].CopyFrom(
attr_value_pb2.AttrValue(i=detections_per_class))
new_output.attr['use_regular_nms'].CopyFrom(
attr_value_pb2.AttrValue(b=use_regular_nms))
new_output.input.extend(
['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors'])
# Transform the graph to append new postprocessing op
input_names = []
output_names = ['TFLite_Detection_PostProcess']
transforms = ['strip_unused_nodes']
transformed_graph_def = TransformGraph(frozen_graph_def, input_names,
output_names, transforms)
return transformed_graph_def
def export_tflite_graph(pipeline_config,
trained_checkpoint_prefix,
output_dir,
add_postprocessing_op,
max_detections,
max_classes_per_detection,
detections_per_class=100,
use_regular_nms=False):
"""Exports a tflite compatible graph and anchors for ssd detection model.
Anchors are written to a tensor and tflite compatible graph
is written to output_dir/tflite_graph.pb.
Args:
pipeline_config: a pipeline.proto object containing the configuration for
SSD model to export.
trained_checkpoint_prefix: a file prefix for the checkpoint containing the
trained parameters of the SSD model.
output_dir: A directory to write the tflite graph and anchor file to.
add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a
TFLite_Detection_PostProcess custom op
max_detections: Maximum number of detections (boxes) to show
max_classes_per_detection: Number of classes to display per detection
detections_per_class: In regular NonMaxSuppression, number of anchors used
for NonMaxSuppression per class
use_regular_nms: Flag to set postprocessing op to use Regular NMS instead
of Fast NMS.
Raises:
ValueError: if the pipeline config contains models other than ssd or uses an
fixed_shape_resizer and provides a shape as well.
"""
tf.gfile.MakeDirs(output_dir)
if pipeline_config.model.WhichOneof('model') != 'ssd':
raise ValueError('Only ssd models are supported in tflite. '
'Found {} in config'.format(
pipeline_config.model.WhichOneof('model')))
num_classes = pipeline_config.model.ssd.num_classes
nms_score_threshold = {
pipeline_config.model.ssd.post_processing.batch_non_max_suppression.
score_threshold
}
nms_iou_threshold = {
pipeline_config.model.ssd.post_processing.batch_non_max_suppression.
iou_threshold
}
scale_values = {}
scale_values['y_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale
}
scale_values['x_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale
}
scale_values['h_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale
}
scale_values['w_scale'] = {
pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale
}
image_resizer_config = pipeline_config.model.ssd.image_resizer
image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof')
num_channels = _DEFAULT_NUM_CHANNELS
if image_resizer == 'fixed_shape_resizer':
height = image_resizer_config.fixed_shape_resizer.height
width = image_resizer_config.fixed_shape_resizer.width
if image_resizer_config.fixed_shape_resizer.convert_to_grayscale:
num_channels = 1
shape = [1, height, width, num_channels]
else:
raise ValueError(
'Only fixed_shape_resizer'
'is supported with tflite. Found {}'.format(
image_resizer_config.WhichOneof('image_resizer_oneof')))
image = tf.placeholder(
tf.float32, shape=shape, name='normalized_input_image_tensor')
detection_model = model_builder.build(
pipeline_config.model, is_training=False)
predicted_tensors = detection_model.predict(image, true_image_shapes=None)
# The score conversion occurs before the post-processing custom op
_, score_conversion_fn = post_processing_builder.build(
pipeline_config.model.ssd.post_processing)
class_predictions = score_conversion_fn(
predicted_tensors['class_predictions_with_background'])
with tf.name_scope('raw_outputs'):
# 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4]
# containing the encoded box predictions. Note that these are raw
# predictions and no Non-Max suppression is applied on them and
# no decode center size boxes is applied to them.
tf.identity(predicted_tensors['box_encodings'], name='box_encodings')
# 'raw_outputs/class_predictions': a float32 tensor of shape
# [1, num_anchors, num_classes] containing the class scores for each anchor
# after applying score conversion.
tf.identity(class_predictions, name='class_predictions')
# 'anchors': a float32 tensor of shape
# [4, num_anchors] containing the anchors as a constant node.
tf.identity(
get_const_center_size_encoded_anchors(predicted_tensors['anchors']),
name='anchors')
# Add global step to the graph, so we know the training step number when we
# evaluate the model.
tf.train.get_or_create_global_step()
# graph rewriter
is_quantized = pipeline_config.HasField('graph_rewriter')
if is_quantized:
graph_rewriter_config = pipeline_config.graph_rewriter
graph_rewriter_fn = graph_rewriter_builder.build(
graph_rewriter_config, is_training=False)
graph_rewriter_fn()
if pipeline_config.model.ssd.feature_extractor.HasField('fpn'):
exporter.rewrite_nn_resize_op(is_quantized)
# freeze the graph
saver_kwargs = {}
if pipeline_config.eval_config.use_moving_averages:
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
moving_average_checkpoint = tempfile.NamedTemporaryFile()
exporter.replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
moving_average_checkpoint.name)
checkpoint_to_use = moving_average_checkpoint.name
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=','.join([
'raw_outputs/box_encodings', 'raw_outputs/class_predictions',
'anchors'
]),
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
output_graph='',
initializer_nodes='')
# Add new operation to do post processing in a custom op (TF Lite only)
if add_postprocessing_op:
transformed_graph_def = append_postprocessing_op(
frozen_graph_def, max_detections, max_classes_per_detection,
nms_score_threshold, nms_iou_threshold, num_classes, scale_values,
detections_per_class, use_regular_nms)
else:
# Return frozen without adding post-processing custom op
transformed_graph_def = frozen_graph_def
binary_graph = os.path.join(output_dir, 'tflite_graph.pb')
with tf.gfile.GFile(binary_graph, 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
txt_graph = os.path.join(output_dir, 'tflite_graph.pbtxt')
with tf.gfile.GFile(txt_graph, 'w') as f:
f.write(str(transformed_graph_def))
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | matcher_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.matcher."""
import numpy as np
import tensorflow as tf
from object_detection.core import matcher
class MatchTest(tf.test.TestCase):
def test_get_correct_matched_columnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [0, 1, 3, 5]
matched_column_indices = match.matched_column_indices()
self.assertEquals(matched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_column_indices = sess.run(matched_column_indices)
self.assertAllEqual(matched_column_indices, expected_column_indices)
def test_get_correct_counts(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
exp_num_matched_columns = 4
exp_num_unmatched_columns = 2
exp_num_ignored_columns = 1
num_matched_columns = match.num_matched_columns()
num_unmatched_columns = match.num_unmatched_columns()
num_ignored_columns = match.num_ignored_columns()
self.assertEquals(num_matched_columns.dtype, tf.int32)
self.assertEquals(num_unmatched_columns.dtype, tf.int32)
self.assertEquals(num_ignored_columns.dtype, tf.int32)
with self.test_session() as sess:
(num_matched_columns_out, num_unmatched_columns_out,
num_ignored_columns_out) = sess.run(
[num_matched_columns, num_unmatched_columns, num_ignored_columns])
self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns)
self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns)
self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns)
def testGetCorrectUnmatchedColumnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4]
unmatched_column_indices = match.unmatched_column_indices()
self.assertEquals(unmatched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_column_indices = sess.run(unmatched_column_indices)
self.assertAllEqual(unmatched_column_indices, expected_column_indices)
def testGetCorrectMatchedRowIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_row_indices = [3, 1, 0, 5]
matched_row_indices = match.matched_row_indices()
self.assertEquals(matched_row_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_row_inds = sess.run(matched_row_indices)
self.assertAllEqual(matched_row_inds, expected_row_indices)
def test_get_correct_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [6]
ignored_column_indices = match.ignored_column_indices()
self.assertEquals(ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
ignored_column_indices = sess.run(ignored_column_indices)
self.assertAllEqual(ignored_column_indices, expected_column_indices)
def test_get_correct_matched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [True, True, False, True, False, True, False]
matched_column_indicator = match.matched_column_indicator()
self.assertEquals(matched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
matched_column_indicator = sess.run(matched_column_indicator)
self.assertAllEqual(matched_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, True, False, True, False, False]
unmatched_column_indicator = match.unmatched_column_indicator()
self.assertEquals(unmatched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
unmatched_column_indicator = sess.run(unmatched_column_indicator)
self.assertAllEqual(unmatched_column_indicator, expected_column_indicator)
def test_get_correct_ignored_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, False, False, False, False, True]
ignored_column_indicator = match.ignored_column_indicator()
self.assertEquals(ignored_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
ignored_column_indicator = sess.run(ignored_column_indicator)
self.assertAllEqual(ignored_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4, 6]
unmatched_ignored_column_indices = (match.
unmatched_or_ignored_column_indices())
self.assertEquals(unmatched_ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_ignored_column_indices = sess.run(
unmatched_ignored_column_indices)
self.assertAllEqual(unmatched_ignored_column_indices,
expected_column_indices)
def test_all_columns_accounted_for(self):
# Note: deliberately setting to small number so not always
# all possibilities appear (matched, unmatched, ignored)
num_matches = 10
match_results = tf.random_uniform(
[num_matches], minval=-2, maxval=5, dtype=tf.int32)
match = matcher.Match(match_results)
matched_column_indices = match.matched_column_indices()
unmatched_column_indices = match.unmatched_column_indices()
ignored_column_indices = match.ignored_column_indices()
with self.test_session() as sess:
matched, unmatched, ignored = sess.run([
matched_column_indices, unmatched_column_indices,
ignored_column_indices
])
all_indices = np.hstack((matched, unmatched, ignored))
all_indices_sorted = np.sort(all_indices)
self.assertAllEqual(all_indices_sorted,
np.arange(num_matches, dtype=np.int32))
def test_scalar_gather_based_on_match(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
input_tensor = tf.constant([0, 1, 2, 3, 4, 5, 6, 7], dtype=tf.float32)
expected_gathered_tensor = [3, 1, 100, 0, 100, 5, 200]
match = matcher.Match(match_results)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=100.,
ignored_value=200.)
self.assertEquals(gathered_tensor.dtype, tf.float32)
with self.test_session():
gathered_tensor_out = gathered_tensor.eval()
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
def test_multidimensional_gather_based_on_match(self):
match_results = tf.constant([1, -1, -2])
input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]],
dtype=tf.float32)
expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]]
match = matcher.Match(match_results)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
self.assertEquals(gathered_tensor.dtype, tf.float32)
with self.test_session():
gathered_tensor_out = gathered_tensor.eval()
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
def test_multidimensional_gather_based_on_match_with_matmul_gather_op(self):
match_results = tf.constant([1, -1, -2])
input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]],
dtype=tf.float32)
expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]]
match = matcher.Match(match_results, use_matmul_gather=True)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
self.assertEquals(gathered_tensor.dtype, tf.float32)
with self.test_session() as sess:
self.assertTrue(
all([op.name is not 'Gather' for op in sess.graph.get_operations()]))
gathered_tensor_out = gathered_tensor.eval()
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/Detection/SSD/models/research/object_detection/data | data | oid_object_detection_challenge_500_label_map | item {
name: "/m/061hd_"
id: 1
display_name: "Infant bed"
}
item {
name: "/m/06m11"
id: 2
display_name: "Rose"
}
item {
name: "/m/03120"
id: 3
display_name: "Flag"
}
item {
name: "/m/01kb5b"
id: 4
display_name: "Flashlight"
}
item {
name: "/m/0120dh"
id: 5
display_name: "Sea turtle"
}
item {
name: "/m/0dv5r"
id: 6
display_name: "Camera"
}
item {
name: "/m/0jbk"
id: 7
display_name: "Animal"
}
item {
name: "/m/0174n1"
id: 8
display_name: "Glove"
}
item {
name: "/m/09f_2"
id: 9
display_name: "Crocodile"
}
item {
name: "/m/01xq0k1"
id: 10
display_name: "Cattle"
}
item {
name: "/m/03jm5"
id: 11
display_name: "House"
}
item {
name: "/m/02g30s"
id: 12
display_name: "Guacamole"
}
item {
name: "/m/05z6w"
id: 13
display_name: "Penguin"
}
item {
name: "/m/01jfm_"
id: 14
display_name: "Vehicle registration plate"
}
item {
name: "/m/076lb9"
id: 15
display_name: "Training bench"
}
item {
name: "/m/0gj37"
id: 16
display_name: "Ladybug"
}
item {
name: "/m/0k0pj"
id: 17
display_name: "Human nose"
}
item {
name: "/m/0kpqd"
id: 18
display_name: "Watermelon"
}
item {
name: "/m/0l14j_"
id: 19
display_name: "Flute"
}
item {
name: "/m/0cyf8"
id: 20
display_name: "Butterfly"
}
item {
name: "/m/0174k2"
id: 21
display_name: "Washing machine"
}
item {
name: "/m/0dq75"
id: 22
display_name: "Raccoon"
}
item {
name: "/m/076bq"
id: 23
display_name: "Segway"
}
item {
name: "/m/07crc"
id: 24
display_name: "Taco"
}
item {
name: "/m/0d8zb"
id: 25
display_name: "Jellyfish"
}
item {
name: "/m/0fszt"
id: 26
display_name: "Cake"
}
item {
name: "/m/0k1tl"
id: 27
display_name: "Pen"
}
item {
name: "/m/020kz"
id: 28
display_name: "Cannon"
}
item {
name: "/m/09728"
id: 29
display_name: "Bread"
}
item {
name: "/m/07j7r"
id: 30
display_name: "Tree"
}
item {
name: "/m/0fbdv"
id: 31
display_name: "Shellfish"
}
item {
name: "/m/03ssj5"
id: 32
display_name: "Bed"
}
item {
name: "/m/03qrc"
id: 33
display_name: "Hamster"
}
item {
name: "/m/02dl1y"
id: 34
display_name: "Hat"
}
item {
name: "/m/01k6s3"
id: 35
display_name: "Toaster"
}
item {
name: "/m/02jfl0"
id: 36
display_name: "Sombrero"
}
item {
name: "/m/01krhy"
id: 37
display_name: "Tiara"
}
item {
name: "/m/04kkgm"
id: 38
display_name: "Bowl"
}
item {
name: "/m/0ft9s"
id: 39
display_name: "Dragonfly"
}
item {
name: "/m/0d_2m"
id: 40
display_name: "Moths and butterflies"
}
item {
name: "/m/0czz2"
id: 41
display_name: "Antelope"
}
item {
name: "/m/0f4s2w"
id: 42
display_name: "Vegetable"
}
item {
name: "/m/07dd4"
id: 43
display_name: "Torch"
}
item {
name: "/m/0cgh4"
id: 44
display_name: "Building"
}
item {
name: "/m/03bbps"
id: 45
display_name: "Power plugs and sockets"
}
item {
name: "/m/02pjr4"
id: 46
display_name: "Blender"
}
item {
name: "/m/04p0qw"
id: 47
display_name: "Billiard table"
}
item {
name: "/m/02pdsw"
id: 48
display_name: "Cutting board"
}
item {
name: "/m/01yx86"
id: 49
display_name: "Bronze sculpture"
}
item {
name: "/m/09dzg"
id: 50
display_name: "Turtle"
}
item {
name: "/m/0hkxq"
id: 51
display_name: "Broccoli"
}
item {
name: "/m/07dm6"
id: 52
display_name: "Tiger"
}
item {
name: "/m/054_l"
id: 53
display_name: "Mirror"
}
item {
name: "/m/01dws"
id: 54
display_name: "Bear"
}
item {
name: "/m/027pcv"
id: 55
display_name: "Zucchini"
}
item {
name: "/m/01d40f"
id: 56
display_name: "Dress"
}
item {
name: "/m/02rgn06"
id: 57
display_name: "Volleyball"
}
item {
name: "/m/0342h"
id: 58
display_name: "Guitar"
}
item {
name: "/m/06bt6"
id: 59
display_name: "Reptile"
}
item {
name: "/m/0323sq"
id: 60
display_name: "Golf cart"
}
item {
name: "/m/02zvsm"
id: 61
display_name: "Tart"
}
item {
name: "/m/02fq_6"
id: 62
display_name: "Fedora"
}
item {
name: "/m/01lrl"
id: 63
display_name: "Carnivore"
}
item {
name: "/m/0k4j"
id: 64
display_name: "Car"
}
item {
name: "/m/04h7h"
id: 65
display_name: "Lighthouse"
}
item {
name: "/m/07xyvk"
id: 66
display_name: "Coffeemaker"
}
item {
name: "/m/03y6mg"
id: 67
display_name: "Food processor"
}
item {
name: "/m/07r04"
id: 68
display_name: "Truck"
}
item {
name: "/m/03__z0"
id: 69
display_name: "Bookcase"
}
item {
name: "/m/019w40"
id: 70
display_name: "Surfboard"
}
item {
name: "/m/09j5n"
id: 71
display_name: "Footwear"
}
item {
name: "/m/0cvnqh"
id: 72
display_name: "Bench"
}
item {
name: "/m/01llwg"
id: 73
display_name: "Necklace"
}
item {
name: "/m/0c9ph5"
id: 74
display_name: "Flower"
}
item {
name: "/m/015x5n"
id: 75
display_name: "Radish"
}
item {
name: "/m/0gd2v"
id: 76
display_name: "Marine mammal"
}
item {
name: "/m/04v6l4"
id: 77
display_name: "Frying pan"
}
item {
name: "/m/02jz0l"
id: 78
display_name: "Tap"
}
item {
name: "/m/0dj6p"
id: 79
display_name: "Peach"
}
item {
name: "/m/04ctx"
id: 80
display_name: "Knife"
}
item {
name: "/m/080hkjn"
id: 81
display_name: "Handbag"
}
item {
name: "/m/01c648"
id: 82
display_name: "Laptop"
}
item {
name: "/m/01j61q"
id: 83
display_name: "Tent"
}
item {
name: "/m/012n7d"
id: 84
display_name: "Ambulance"
}
item {
name: "/m/025nd"
id: 85
display_name: "Christmas tree"
}
item {
name: "/m/09csl"
id: 86
display_name: "Eagle"
}
item {
name: "/m/01lcw4"
id: 87
display_name: "Limousine"
}
item {
name: "/m/0h8n5zk"
id: 88
display_name: "Kitchen & dining room table"
}
item {
name: "/m/0633h"
id: 89
display_name: "Polar bear"
}
item {
name: "/m/01fdzj"
id: 90
display_name: "Tower"
}
item {
name: "/m/01226z"
id: 91
display_name: "Football"
}
item {
name: "/m/0mw_6"
id: 92
display_name: "Willow"
}
item {
name: "/m/04hgtk"
id: 93
display_name: "Human head"
}
item {
name: "/m/02pv19"
id: 94
display_name: "Stop sign"
}
item {
name: "/m/09qck"
id: 95
display_name: "Banana"
}
item {
name: "/m/063rgb"
id: 96
display_name: "Mixer"
}
item {
name: "/m/0lt4_"
id: 97
display_name: "Binoculars"
}
item {
name: "/m/0270h"
id: 98
display_name: "Dessert"
}
item {
name: "/m/01h3n"
id: 99
display_name: "Bee"
}
item {
name: "/m/01mzpv"
id: 100
display_name: "Chair"
}
item {
name: "/m/04169hn"
id: 101
display_name: "Wood-burning stove"
}
item {
name: "/m/0fm3zh"
id: 102
display_name: "Flowerpot"
}
item {
name: "/m/0d20w4"
id: 103
display_name: "Beaker"
}
item {
name: "/m/0_cp5"
id: 104
display_name: "Oyster"
}
item {
name: "/m/01dy8n"
id: 105
display_name: "Woodpecker"
}
item {
name: "/m/03m5k"
id: 106
display_name: "Harp"
}
item {
name: "/m/03dnzn"
id: 107
display_name: "Bathtub"
}
item {
name: "/m/0h8mzrc"
id: 108
display_name: "Wall clock"
}
item {
name: "/m/0h8mhzd"
id: 109
display_name: "Sports uniform"
}
item {
name: "/m/03d443"
id: 110
display_name: "Rhinoceros"
}
item {
name: "/m/01gllr"
id: 111
display_name: "Beehive"
}
item {
name: "/m/0642b4"
id: 112
display_name: "Cupboard"
}
item {
name: "/m/09b5t"
id: 113
display_name: "Chicken"
}
item {
name: "/m/04yx4"
id: 114
display_name: "Man"
}
item {
name: "/m/01f8m5"
id: 115
display_name: "Blue jay"
}
item {
name: "/m/015x4r"
id: 116
display_name: "Cucumber"
}
item {
name: "/m/01j51"
id: 117
display_name: "Balloon"
}
item {
name: "/m/02zt3"
id: 118
display_name: "Kite"
}
item {
name: "/m/03tw93"
id: 119
display_name: "Fireplace"
}
item {
name: "/m/01jfsr"
id: 120
display_name: "Lantern"
}
item {
name: "/m/04ylt"
id: 121
display_name: "Missile"
}
item {
name: "/m/0bt_c3"
id: 122
display_name: "Book"
}
item {
name: "/m/0cmx8"
id: 123
display_name: "Spoon"
}
item {
name: "/m/0hqkz"
id: 124
display_name: "Grapefruit"
}
item {
name: "/m/071qp"
id: 125
display_name: "Squirrel"
}
item {
name: "/m/0cyhj_"
id: 126
display_name: "Orange"
}
item {
name: "/m/01xygc"
id: 127
display_name: "Coat"
}
item {
name: "/m/0420v5"
id: 128
display_name: "Punching bag"
}
item {
name: "/m/0898b"
id: 129
display_name: "Zebra"
}
item {
name: "/m/01knjb"
id: 130
display_name: "Billboard"
}
item {
name: "/m/0199g"
id: 131
display_name: "Bicycle"
}
item {
name: "/m/03c7gz"
id: 132
display_name: "Door handle"
}
item {
name: "/m/02x984l"
id: 133
display_name: "Mechanical fan"
}
item {
name: "/m/04zwwv"
id: 134
display_name: "Ring binder"
}
item {
name: "/m/04bcr3"
id: 135
display_name: "Table"
}
item {
name: "/m/0gv1x"
id: 136
display_name: "Parrot"
}
item {
name: "/m/01nq26"
id: 137
display_name: "Sock"
}
item {
name: "/m/02s195"
id: 138
display_name: "Vase"
}
item {
name: "/m/083kb"
id: 139
display_name: "Weapon"
}
item {
name: "/m/06nrc"
id: 140
display_name: "Shotgun"
}
item {
name: "/m/0jyfg"
id: 141
display_name: "Glasses"
}
item {
name: "/m/0nybt"
id: 142
display_name: "Seahorse"
}
item {
name: "/m/0176mf"
id: 143
display_name: "Belt"
}
item {
name: "/m/01rzcn"
id: 144
display_name: "Watercraft"
}
item {
name: "/m/0d4v4"
id: 145
display_name: "Window"
}
item {
name: "/m/03bk1"
id: 146
display_name: "Giraffe"
}
item {
name: "/m/096mb"
id: 147
display_name: "Lion"
}
item {
name: "/m/0h9mv"
id: 148
display_name: "Tire"
}
item {
name: "/m/07yv9"
id: 149
display_name: "Vehicle"
}
item {
name: "/m/0ph39"
id: 150
display_name: "Canoe"
}
item {
name: "/m/01rkbr"
id: 151
display_name: "Tie"
}
item {
name: "/m/0gjbg72"
id: 152
display_name: "Shelf"
}
item {
name: "/m/06z37_"
id: 153
display_name: "Picture frame"
}
item {
name: "/m/01m4t"
id: 154
display_name: "Printer"
}
item {
name: "/m/035r7c"
id: 155
display_name: "Human leg"
}
item {
name: "/m/019jd"
id: 156
display_name: "Boat"
}
item {
name: "/m/02tsc9"
id: 157
display_name: "Slow cooker"
}
item {
name: "/m/015wgc"
id: 158
display_name: "Croissant"
}
item {
name: "/m/0c06p"
id: 159
display_name: "Candle"
}
item {
name: "/m/01dwwc"
id: 160
display_name: "Pancake"
}
item {
name: "/m/034c16"
id: 161
display_name: "Pillow"
}
item {
name: "/m/0242l"
id: 162
display_name: "Coin"
}
item {
name: "/m/02lbcq"
id: 163
display_name: "Stretcher"
}
item {
name: "/m/03nfch"
id: 164
display_name: "Sandal"
}
item {
name: "/m/03bt1vf"
id: 165
display_name: "Woman"
}
item {
name: "/m/01lynh"
id: 166
display_name: "Stairs"
}
item {
name: "/m/03q5t"
id: 167
display_name: "Harpsichord"
}
item {
name: "/m/0fqt361"
id: 168
display_name: "Stool"
}
item {
name: "/m/01bjv"
id: 169
display_name: "Bus"
}
item {
name: "/m/01s55n"
id: 170
display_name: "Suitcase"
}
item {
name: "/m/0283dt1"
id: 171
display_name: "Human mouth"
}
item {
name: "/m/01z1kdw"
id: 172
display_name: "Juice"
}
item {
name: "/m/016m2d"
id: 173
display_name: "Skull"
}
item {
name: "/m/02dgv"
id: 174
display_name: "Door"
}
item {
name: "/m/07y_7"
id: 175
display_name: "Violin"
}
item {
name: "/m/01_5g"
id: 176
display_name: "Chopsticks"
}
item {
name: "/m/06_72j"
id: 177
display_name: "Digital clock"
}
item {
name: "/m/0ftb8"
id: 178
display_name: "Sunflower"
}
item {
name: "/m/0c29q"
id: 179
display_name: "Leopard"
}
item {
name: "/m/0jg57"
id: 180
display_name: "Bell pepper"
}
item {
name: "/m/02l8p9"
id: 181
display_name: "Harbor seal"
}
item {
name: "/m/078jl"
id: 182
display_name: "Snake"
}
item {
name: "/m/0llzx"
id: 183
display_name: "Sewing machine"
}
item {
name: "/m/0dbvp"
id: 184
display_name: "Goose"
}
item {
name: "/m/09ct_"
id: 185
display_name: "Helicopter"
}
item {
name: "/m/0dkzw"
id: 186
display_name: "Seat belt"
}
item {
name: "/m/02p5f1q"
id: 187
display_name: "Coffee cup"
}
item {
name: "/m/0fx9l"
id: 188
display_name: "Microwave oven"
}
item {
name: "/m/01b9xk"
id: 189
display_name: "Hot dog"
}
item {
name: "/m/0b3fp9"
id: 190
display_name: "Countertop"
}
item {
name: "/m/0h8n27j"
id: 191
display_name: "Serving tray"
}
item {
name: "/m/0h8n6f9"
id: 192
display_name: "Dog bed"
}
item {
name: "/m/01599"
id: 193
display_name: "Beer"
}
item {
name: "/m/017ftj"
id: 194
display_name: "Sunglasses"
}
item {
name: "/m/044r5d"
id: 195
display_name: "Golf ball"
}
item {
name: "/m/01dwsz"
id: 196
display_name: "Waffle"
}
item {
name: "/m/0cdl1"
id: 197
display_name: "Palm tree"
}
item {
name: "/m/07gql"
id: 198
display_name: "Trumpet"
}
item {
name: "/m/0hdln"
id: 199
display_name: "Ruler"
}
item {
name: "/m/0zvk5"
id: 200
display_name: "Helmet"
}
item {
name: "/m/012w5l"
id: 201
display_name: "Ladder"
}
item {
name: "/m/021sj1"
id: 202
display_name: "Office building"
}
item {
name: "/m/0bh9flk"
id: 203
display_name: "Tablet computer"
}
item {
name: "/m/09gtd"
id: 204
display_name: "Toilet paper"
}
item {
name: "/m/0jwn_"
id: 205
display_name: "Pomegranate"
}
item {
name: "/m/02wv6h6"
id: 206
display_name: "Skirt"
}
item {
name: "/m/02wv84t"
id: 207
display_name: "Gas stove"
}
item {
name: "/m/021mn"
id: 208
display_name: "Cookie"
}
item {
name: "/m/018p4k"
id: 209
display_name: "Cart"
}
item {
name: "/m/06j2d"
id: 210
display_name: "Raven"
}
item {
name: "/m/033cnk"
id: 211
display_name: "Egg"
}
item {
name: "/m/01j3zr"
id: 212
display_name: "Burrito"
}
item {
name: "/m/03fwl"
id: 213
display_name: "Goat"
}
item {
name: "/m/058qzx"
id: 214
display_name: "Kitchen knife"
}
item {
name: "/m/06_fw"
id: 215
display_name: "Skateboard"
}
item {
name: "/m/02x8cch"
id: 216
display_name: "Salt and pepper shakers"
}
item {
name: "/m/04g2r"
id: 217
display_name: "Lynx"
}
item {
name: "/m/01b638"
id: 218
display_name: "Boot"
}
item {
name: "/m/099ssp"
id: 219
display_name: "Platter"
}
item {
name: "/m/071p9"
id: 220
display_name: "Ski"
}
item {
name: "/m/01gkx_"
id: 221
display_name: "Swimwear"
}
item {
name: "/m/0b_rs"
id: 222
display_name: "Swimming pool"
}
item {
name: "/m/03v5tg"
id: 223
display_name: "Drinking straw"
}
item {
name: "/m/01j5ks"
id: 224
display_name: "Wrench"
}
item {
name: "/m/026t6"
id: 225
display_name: "Drum"
}
item {
name: "/m/0_k2"
id: 226
display_name: "Ant"
}
item {
name: "/m/039xj_"
id: 227
display_name: "Human ear"
}
item {
name: "/m/01b7fy"
id: 228
display_name: "Headphones"
}
item {
name: "/m/0220r2"
id: 229
display_name: "Fountain"
}
item {
name: "/m/015p6"
id: 230
display_name: "Bird"
}
item {
name: "/m/0fly7"
id: 231
display_name: "Jeans"
}
item {
name: "/m/07c52"
id: 232
display_name: "Television"
}
item {
name: "/m/0n28_"
id: 233
display_name: "Crab"
}
item {
name: "/m/0hg7b"
id: 234
display_name: "Microphone"
}
item {
name: "/m/019dx1"
id: 235
display_name: "Home appliance"
}
item {
name: "/m/04vv5k"
id: 236
display_name: "Snowplow"
}
item {
name: "/m/020jm"
id: 237
display_name: "Beetle"
}
item {
name: "/m/047v4b"
id: 238
display_name: "Artichoke"
}
item {
name: "/m/01xs3r"
id: 239
display_name: "Jet ski"
}
item {
name: "/m/03kt2w"
id: 240
display_name: "Stationary bicycle"
}
item {
name: "/m/03q69"
id: 241
display_name: "Human hair"
}
item {
name: "/m/01dxs"
id: 242
display_name: "Brown bear"
}
item {
name: "/m/01h8tj"
id: 243
display_name: "Starfish"
}
item {
name: "/m/0dt3t"
id: 244
display_name: "Fork"
}
item {
name: "/m/0cjq5"
id: 245
display_name: "Lobster"
}
item {
name: "/m/0h8lkj8"
id: 246
display_name: "Corded phone"
}
item {
name: "/m/0271t"
id: 247
display_name: "Drink"
}
item {
name: "/m/03q5c7"
id: 248
display_name: "Saucer"
}
item {
name: "/m/0fj52s"
id: 249
display_name: "Carrot"
}
item {
name: "/m/03vt0"
id: 250
display_name: "Insect"
}
item {
name: "/m/01x3z"
id: 251
display_name: "Clock"
}
item {
name: "/m/0d5gx"
id: 252
display_name: "Castle"
}
item {
name: "/m/0h8my_4"
id: 253
display_name: "Tennis racket"
}
item {
name: "/m/03ldnb"
id: 254
display_name: "Ceiling fan"
}
item {
name: "/m/0cjs7"
id: 255
display_name: "Asparagus"
}
item {
name: "/m/0449p"
id: 256
display_name: "Jaguar"
}
item {
name: "/m/04szw"
id: 257
display_name: "Musical instrument"
}
item {
name: "/m/07jdr"
id: 258
display_name: "Train"
}
item {
name: "/m/01yrx"
id: 259
display_name: "Cat"
}
item {
name: "/m/06c54"
id: 260
display_name: "Rifle"
}
item {
name: "/m/04h8sr"
id: 261
display_name: "Dumbbell"
}
item {
name: "/m/050k8"
id: 262
display_name: "Mobile phone"
}
item {
name: "/m/0pg52"
id: 263
display_name: "Taxi"
}
item {
name: "/m/02f9f_"
id: 264
display_name: "Shower"
}
item {
name: "/m/054fyh"
id: 265
display_name: "Pitcher"
}
item {
name: "/m/09k_b"
id: 266
display_name: "Lemon"
}
item {
name: "/m/03xxp"
id: 267
display_name: "Invertebrate"
}
item {
name: "/m/0jly1"
id: 268
display_name: "Turkey"
}
item {
name: "/m/06k2mb"
id: 269
display_name: "High heels"
}
item {
name: "/m/04yqq2"
id: 270
display_name: "Bust"
}
item {
name: "/m/0bwd_0j"
id: 271
display_name: "Elephant"
}
item {
name: "/m/02h19r"
id: 272
display_name: "Scarf"
}
item {
name: "/m/02zn6n"
id: 273
display_name: "Barrel"
}
item {
name: "/m/07c6l"
id: 274
display_name: "Trombone"
}
item {
name: "/m/05zsy"
id: 275
display_name: "Pumpkin"
}
item {
name: "/m/025dyy"
id: 276
display_name: "Box"
}
item {
name: "/m/07j87"
id: 277
display_name: "Tomato"
}
item {
name: "/m/09ld4"
id: 278
display_name: "Frog"
}
item {
name: "/m/01vbnl"
id: 279
display_name: "Bidet"
}
item {
name: "/m/0dzct"
id: 280
display_name: "Human face"
}
item {
name: "/m/03fp41"
id: 281
display_name: "Houseplant"
}
item {
name: "/m/0h2r6"
id: 282
display_name: "Van"
}
item {
name: "/m/0by6g"
id: 283
display_name: "Shark"
}
item {
name: "/m/0cxn2"
id: 284
display_name: "Ice cream"
}
item {
name: "/m/04tn4x"
id: 285
display_name: "Swim cap"
}
item {
name: "/m/0f6wt"
id: 286
display_name: "Falcon"
}
item {
name: "/m/05n4y"
id: 287
display_name: "Ostrich"
}
item {
name: "/m/0gxl3"
id: 288
display_name: "Handgun"
}
item {
name: "/m/02d9qx"
id: 289
display_name: "Whiteboard"
}
item {
name: "/m/04m9y"
id: 290
display_name: "Lizard"
}
item {
name: "/m/05z55"
id: 291
display_name: "Pasta"
}
item {
name: "/m/01x3jk"
id: 292
display_name: "Snowmobile"
}
item {
name: "/m/0h8l4fh"
id: 293
display_name: "Light bulb"
}
item {
name: "/m/031b6r"
id: 294
display_name: "Window blind"
}
item {
name: "/m/01tcjp"
id: 295
display_name: "Muffin"
}
item {
name: "/m/01f91_"
id: 296
display_name: "Pretzel"
}
item {
name: "/m/02522"
id: 297
display_name: "Computer monitor"
}
item {
name: "/m/0319l"
id: 298
display_name: "Horn"
}
item {
name: "/m/0c_jw"
id: 299
display_name: "Furniture"
}
item {
name: "/m/0l515"
id: 300
display_name: "Sandwich"
}
item {
name: "/m/0306r"
id: 301
display_name: "Fox"
}
item {
name: "/m/0crjs"
id: 302
display_name: "Convenience store"
}
item {
name: "/m/0ch_cf"
id: 303
display_name: "Fish"
}
item {
name: "/m/02xwb"
id: 304
display_name: "Fruit"
}
item {
name: "/m/01r546"
id: 305
display_name: "Earrings"
}
item {
name: "/m/03rszm"
id: 306
display_name: "Curtain"
}
item {
name: "/m/0388q"
id: 307
display_name: "Grape"
}
item {
name: "/m/03m3pdh"
id: 308
display_name: "Sofa bed"
}
item {
name: "/m/03k3r"
id: 309
display_name: "Horse"
}
item {
name: "/m/0hf58v5"
id: 310
display_name: "Luggage and bags"
}
item {
name: "/m/01y9k5"
id: 311
display_name: "Desk"
}
item {
name: "/m/05441v"
id: 312
display_name: "Crutch"
}
item {
name: "/m/03p3bw"
id: 313
display_name: "Bicycle helmet"
}
item {
name: "/m/0175cv"
id: 314
display_name: "Tick"
}
item {
name: "/m/0cmf2"
id: 315
display_name: "Airplane"
}
item {
name: "/m/0ccs93"
id: 316
display_name: "Canary"
}
item {
name: "/m/02d1br"
id: 317
display_name: "Spatula"
}
item {
name: "/m/0gjkl"
id: 318
display_name: "Watch"
}
item {
name: "/m/0jqgx"
id: 319
display_name: "Lily"
}
item {
name: "/m/0h99cwc"
id: 320
display_name: "Kitchen appliance"
}
item {
name: "/m/047j0r"
id: 321
display_name: "Filing cabinet"
}
item {
name: "/m/0k5j"
id: 322
display_name: "Aircraft"
}
item {
name: "/m/0h8n6ft"
id: 323
display_name: "Cake stand"
}
item {
name: "/m/0gm28"
id: 324
display_name: "Candy"
}
item {
name: "/m/0130jx"
id: 325
display_name: "Sink"
}
item {
name: "/m/04rmv"
id: 326
display_name: "Mouse"
}
item {
name: "/m/081qc"
id: 327
display_name: "Wine"
}
item {
name: "/m/0qmmr"
id: 328
display_name: "Wheelchair"
}
item {
name: "/m/03fj2"
id: 329
display_name: "Goldfish"
}
item {
name: "/m/040b_t"
id: 330
display_name: "Refrigerator"
}
item {
name: "/m/02y6n"
id: 331
display_name: "French fries"
}
item {
name: "/m/0fqfqc"
id: 332
display_name: "Drawer"
}
item {
name: "/m/030610"
id: 333
display_name: "Treadmill"
}
item {
name: "/m/07kng9"
id: 334
display_name: "Picnic basket"
}
item {
name: "/m/029b3"
id: 335
display_name: "Dice"
}
item {
name: "/m/0fbw6"
id: 336
display_name: "Cabbage"
}
item {
name: "/m/07qxg_"
id: 337
display_name: "Football helmet"
}
item {
name: "/m/068zj"
id: 338
display_name: "Pig"
}
item {
name: "/m/01g317"
id: 339
display_name: "Person"
}
item {
name: "/m/01bfm9"
id: 340
display_name: "Shorts"
}
item {
name: "/m/02068x"
id: 341
display_name: "Gondola"
}
item {
name: "/m/0fz0h"
id: 342
display_name: "Honeycomb"
}
item {
name: "/m/0jy4k"
id: 343
display_name: "Doughnut"
}
item {
name: "/m/05kyg_"
id: 344
display_name: "Chest of drawers"
}
item {
name: "/m/01prls"
id: 345
display_name: "Land vehicle"
}
item {
name: "/m/01h44"
id: 346
display_name: "Bat"
}
item {
name: "/m/08pbxl"
id: 347
display_name: "Monkey"
}
item {
name: "/m/02gzp"
id: 348
display_name: "Dagger"
}
item {
name: "/m/04brg2"
id: 349
display_name: "Tableware"
}
item {
name: "/m/031n1"
id: 350
display_name: "Human foot"
}
item {
name: "/m/02jvh9"
id: 351
display_name: "Mug"
}
item {
name: "/m/046dlr"
id: 352
display_name: "Alarm clock"
}
item {
name: "/m/0h8ntjv"
id: 353
display_name: "Pressure cooker"
}
item {
name: "/m/0k65p"
id: 354
display_name: "Human hand"
}
item {
name: "/m/011k07"
id: 355
display_name: "Tortoise"
}
item {
name: "/m/03grzl"
id: 356
display_name: "Baseball glove"
}
item {
name: "/m/06y5r"
id: 357
display_name: "Sword"
}
item {
name: "/m/061_f"
id: 358
display_name: "Pear"
}
item {
name: "/m/01cmb2"
id: 359
display_name: "Miniskirt"
}
item {
name: "/m/01mqdt"
id: 360
display_name: "Traffic sign"
}
item {
name: "/m/05r655"
id: 361
display_name: "Girl"
}
item {
name: "/m/02p3w7d"
id: 362
display_name: "Roller skates"
}
item {
name: "/m/029tx"
id: 363
display_name: "Dinosaur"
}
item {
name: "/m/04m6gz"
id: 364
display_name: "Porch"
}
item {
name: "/m/015h_t"
id: 365
display_name: "Human beard"
}
item {
name: "/m/06pcq"
id: 366
display_name: "Submarine sandwich"
}
item {
name: "/m/01bms0"
id: 367
display_name: "Screwdriver"
}
item {
name: "/m/07fbm7"
id: 368
display_name: "Strawberry"
}
item {
name: "/m/09tvcd"
id: 369
display_name: "Wine glass"
}
item {
name: "/m/06nwz"
id: 370
display_name: "Seafood"
}
item {
name: "/m/0dv9c"
id: 371
display_name: "Racket"
}
item {
name: "/m/083wq"
id: 372
display_name: "Wheel"
}
item {
name: "/m/0gd36"
id: 373
display_name: "Sea lion"
}
item {
name: "/m/0138tl"
id: 374
display_name: "Toy"
}
item {
name: "/m/07clx"
id: 375
display_name: "Tea"
}
item {
name: "/m/05ctyq"
id: 376
display_name: "Tennis ball"
}
item {
name: "/m/0bjyj5"
id: 377
display_name: "Waste container"
}
item {
name: "/m/0dbzx"
id: 378
display_name: "Mule"
}
item {
name: "/m/02ctlc"
id: 379
display_name: "Cricket ball"
}
item {
name: "/m/0fp6w"
id: 380
display_name: "Pineapple"
}
item {
name: "/m/0djtd"
id: 381
display_name: "Coconut"
}
item {
name: "/m/0167gd"
id: 382
display_name: "Doll"
}
item {
name: "/m/078n6m"
id: 383
display_name: "Coffee table"
}
item {
name: "/m/0152hh"
id: 384
display_name: "Snowman"
}
item {
name: "/m/04gth"
id: 385
display_name: "Lavender"
}
item {
name: "/m/0ll1f78"
id: 386
display_name: "Shrimp"
}
item {
name: "/m/0cffdh"
id: 387
display_name: "Maple"
}
item {
name: "/m/025rp__"
id: 388
display_name: "Cowboy hat"
}
item {
name: "/m/02_n6y"
id: 389
display_name: "Goggles"
}
item {
name: "/m/0wdt60w"
id: 390
display_name: "Rugby ball"
}
item {
name: "/m/0cydv"
id: 391
display_name: "Caterpillar"
}
item {
name: "/m/01n5jq"
id: 392
display_name: "Poster"
}
item {
name: "/m/09rvcxw"
id: 393
display_name: "Rocket"
}
item {
name: "/m/013y1f"
id: 394
display_name: "Organ"
}
item {
name: "/m/06ncr"
id: 395
display_name: "Saxophone"
}
item {
name: "/m/015qff"
id: 396
display_name: "Traffic light"
}
item {
name: "/m/024g6"
id: 397
display_name: "Cocktail"
}
item {
name: "/m/05gqfk"
id: 398
display_name: "Plastic bag"
}
item {
name: "/m/0dv77"
id: 399
display_name: "Squash"
}
item {
name: "/m/052sf"
id: 400
display_name: "Mushroom"
}
item {
name: "/m/0cdn1"
id: 401
display_name: "Hamburger"
}
item {
name: "/m/03jbxj"
id: 402
display_name: "Light switch"
}
item {
name: "/m/0cyfs"
id: 403
display_name: "Parachute"
}
item {
name: "/m/0kmg4"
id: 404
display_name: "Teddy bear"
}
item {
name: "/m/02cvgx"
id: 405
display_name: "Winter melon"
}
item {
name: "/m/09kx5"
id: 406
display_name: "Deer"
}
item {
name: "/m/057cc"
id: 407
display_name: "Musical keyboard"
}
item {
name: "/m/02pkr5"
id: 408
display_name: "Plumbing fixture"
}
item {
name: "/m/057p5t"
id: 409
display_name: "Scoreboard"
}
item {
name: "/m/03g8mr"
id: 410
display_name: "Baseball bat"
}
item {
name: "/m/0frqm"
id: 411
display_name: "Envelope"
}
item {
name: "/m/03m3vtv"
id: 412
display_name: "Adhesive tape"
}
item {
name: "/m/0584n8"
id: 413
display_name: "Briefcase"
}
item {
name: "/m/014y4n"
id: 414
display_name: "Paddle"
}
item {
name: "/m/01g3x7"
id: 415
display_name: "Bow and arrow"
}
item {
name: "/m/07cx4"
id: 416
display_name: "Telephone"
}
item {
name: "/m/07bgp"
id: 417
display_name: "Sheep"
}
item {
name: "/m/032b3c"
id: 418
display_name: "Jacket"
}
item {
name: "/m/01bl7v"
id: 419
display_name: "Boy"
}
item {
name: "/m/0663v"
id: 420
display_name: "Pizza"
}
item {
name: "/m/0cn6p"
id: 421
display_name: "Otter"
}
item {
name: "/m/02rdsp"
id: 422
display_name: "Office supplies"
}
item {
name: "/m/02crq1"
id: 423
display_name: "Couch"
}
item {
name: "/m/01xqw"
id: 424
display_name: "Cello"
}
item {
name: "/m/0cnyhnx"
id: 425
display_name: "Bull"
}
item {
name: "/m/01x_v"
id: 426
display_name: "Camel"
}
item {
name: "/m/018xm"
id: 427
display_name: "Ball"
}
item {
name: "/m/09ddx"
id: 428
display_name: "Duck"
}
item {
name: "/m/084zz"
id: 429
display_name: "Whale"
}
item {
name: "/m/01n4qj"
id: 430
display_name: "Shirt"
}
item {
name: "/m/07cmd"
id: 431
display_name: "Tank"
}
item {
name: "/m/04_sv"
id: 432
display_name: "Motorcycle"
}
item {
name: "/m/0mkg"
id: 433
display_name: "Accordion"
}
item {
name: "/m/09d5_"
id: 434
display_name: "Owl"
}
item {
name: "/m/0c568"
id: 435
display_name: "Porcupine"
}
item {
name: "/m/02wbtzl"
id: 436
display_name: "Sun hat"
}
item {
name: "/m/05bm6"
id: 437
display_name: "Nail"
}
item {
name: "/m/01lsmm"
id: 438
display_name: "Scissors"
}
item {
name: "/m/0dftk"
id: 439
display_name: "Swan"
}
item {
name: "/m/0dtln"
id: 440
display_name: "Lamp"
}
item {
name: "/m/0nl46"
id: 441
display_name: "Crown"
}
item {
name: "/m/05r5c"
id: 442
display_name: "Piano"
}
item {
name: "/m/06msq"
id: 443
display_name: "Sculpture"
}
item {
name: "/m/0cd4d"
id: 444
display_name: "Cheetah"
}
item {
name: "/m/05kms"
id: 445
display_name: "Oboe"
}
item {
name: "/m/02jnhm"
id: 446
display_name: "Tin can"
}
item {
name: "/m/0fldg"
id: 447
display_name: "Mango"
}
item {
name: "/m/073bxn"
id: 448
display_name: "Tripod"
}
item {
name: "/m/029bxz"
id: 449
display_name: "Oven"
}
item {
name: "/m/020lf"
id: 450
display_name: "Computer mouse"
}
item {
name: "/m/01btn"
id: 451
display_name: "Barge"
}
item {
name: "/m/02vqfm"
id: 452
display_name: "Coffee"
}
item {
name: "/m/06__v"
id: 453
display_name: "Snowboard"
}
item {
name: "/m/043nyj"
id: 454
display_name: "Common fig"
}
item {
name: "/m/0grw1"
id: 455
display_name: "Salad"
}
item {
name: "/m/03hl4l9"
id: 456
display_name: "Marine invertebrates"
}
item {
name: "/m/0hnnb"
id: 457
display_name: "Umbrella"
}
item {
name: "/m/04c0y"
id: 458
display_name: "Kangaroo"
}
item {
name: "/m/0dzf4"
id: 459
display_name: "Human arm"
}
item {
name: "/m/07v9_z"
id: 460
display_name: "Measuring cup"
}
item {
name: "/m/0f9_l"
id: 461
display_name: "Snail"
}
item {
name: "/m/0703r8"
id: 462
display_name: "Loveseat"
}
item {
name: "/m/01xyhv"
id: 463
display_name: "Suit"
}
item {
name: "/m/01fh4r"
id: 464
display_name: "Teapot"
}
item {
name: "/m/04dr76w"
id: 465
display_name: "Bottle"
}
item {
name: "/m/0pcr"
id: 466
display_name: "Alpaca"
}
item {
name: "/m/03s_tn"
id: 467
display_name: "Kettle"
}
item {
name: "/m/07mhn"
id: 468
display_name: "Trousers"
}
item {
name: "/m/01hrv5"
id: 469
display_name: "Popcorn"
}
item {
name: "/m/019h78"
id: 470
display_name: "Centipede"
}
item {
name: "/m/09kmb"
id: 471
display_name: "Spider"
}
item {
name: "/m/0h23m"
id: 472
display_name: "Sparrow"
}
item {
name: "/m/050gv4"
id: 473
display_name: "Plate"
}
item {
name: "/m/01fb_0"
id: 474
display_name: "Bagel"
}
item {
name: "/m/02w3_ws"
id: 475
display_name: "Personal care"
}
item {
name: "/m/014j1m"
id: 476
display_name: "Apple"
}
item {
name: "/m/01gmv2"
id: 477
display_name: "Brassiere"
}
item {
name: "/m/04y4h8h"
id: 478
display_name: "Bathroom cabinet"
}
item {
name: "/m/026qbn5"
id: 479
display_name: "Studio couch"
}
item {
name: "/m/01m2v"
id: 480
display_name: "Computer keyboard"
}
item {
name: "/m/05_5p_0"
id: 481
display_name: "Table tennis racket"
}
item {
name: "/m/07030"
id: 482
display_name: "Sushi"
}
item {
name: "/m/01s105"
id: 483
display_name: "Cabinetry"
}
item {
name: "/m/033rq4"
id: 484
display_name: "Street light"
}
item {
name: "/m/0162_1"
id: 485
display_name: "Towel"
}
item {
name: "/m/02z51p"
id: 486
display_name: "Nightstand"
}
item {
name: "/m/06mf6"
id: 487
display_name: "Rabbit"
}
item {
name: "/m/02hj4"
id: 488
display_name: "Dolphin"
}
item {
name: "/m/0bt9lr"
id: 489
display_name: "Dog"
}
item {
name: "/m/08hvt4"
id: 490
display_name: "Jug"
}
item {
name: "/m/084rd"
id: 491
display_name: "Wok"
}
item {
name: "/m/01pns0"
id: 492
display_name: "Fire hydrant"
}
item {
name: "/m/014sv8"
id: 493
display_name: "Human eye"
}
item {
name: "/m/079cl"
id: 494
display_name: "Skyscraper"
}
item {
name: "/m/01940j"
id: 495
display_name: "Backpack"
}
item {
name: "/m/05vtc"
id: 496
display_name: "Potato"
}
item {
name: "/m/02w3r3"
id: 497
display_name: "Paper towel"
}
item {
name: "/m/054xkw"
id: 498
display_name: "Lifejacket"
}
item {
name: "/m/01bqk0"
id: 499
display_name: "Bicycle wheel"
}
item {
name: "/m/09g1w"
id: 500
display_name: "Toilet"
}
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs | configs | e2e_mask_rcnn_R_50_FPN_1x_bs32 | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50"
BACKBONE:
CONV_BODY: "R-50-FPN"
OUT_CHANNELS: 256
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TRAIN: 4000
ROI_HEADS:
USE_FPN: True
ROI_BOX_HEAD:
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"
PREDICTOR: "FPNPredictor"
ROI_MASK_HEAD:
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"
PREDICTOR: "MaskRCNNC4Predictor"
POOLER_RESOLUTION: 14
POOLER_SAMPLING_RATIO: 2
RESOLUTION: 28
SHARE_BOX_FEATURE_EXTRACTOR: False
MASK_ON: True
DATASETS:
TRAIN: ("coco_2017_train",)
TEST: ("coco_2017_val",)
DATALOADER:
SIZE_DIVISIBILITY: 32
SOLVER:
BASE_LR: 0.04
WEIGHT_DECAY: 0.0001
STEPS: (33000, 44000)
MAX_ITER: 50000
IMS_PER_BATCH: 32
TEST:
IMS_PER_BATCH: 8
|
PyTorch/Translation/GNMT/seq2seq/data | data | sampler | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import torch
from torch.utils.data.sampler import Sampler
from seq2seq.utils import get_rank
from seq2seq.utils import get_world_size
class DistributedSampler(Sampler):
def __init__(self, dataset, batch_size, seeds, world_size=None, rank=None):
"""
Constructor for the DistributedSampler.
:param dataset: dataset
:param batch_size: local batch size
:param seeds: list of seeds, one seed for each training epoch
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
if world_size is None:
world_size = get_world_size()
if rank is None:
rank = get_rank()
self.dataset = dataset
self.world_size = world_size
self.rank = rank
self.epoch = 0
self.seeds = seeds
self.batch_size = batch_size
self.global_batch_size = batch_size * world_size
self.data_len = len(self.dataset)
self.num_samples = self.data_len // self.global_batch_size \
* self.global_batch_size
def init_rng(self):
"""
Creates new RNG, seed depends on current epoch idx.
"""
rng = torch.Generator()
seed = self.seeds[self.epoch]
logging.info(f'Sampler for epoch {self.epoch} uses seed {seed}')
rng.manual_seed(seed)
return rng
def distribute_batches(self, indices):
"""
Assigns batches to workers.
Consecutive ranks are getting consecutive batches.
:param indices: torch.tensor with batch indices
"""
assert len(indices) == self.num_samples
indices = indices.view(-1, self.batch_size)
indices = indices[self.rank::self.world_size].contiguous()
indices = indices.view(-1)
indices = indices.tolist()
assert len(indices) == self.num_samples // self.world_size
return indices
def reshuffle_batches(self, indices, rng):
"""
Permutes global batches
:param indices: torch.tensor with batch indices
:param rng: instance of torch.Generator
"""
indices = indices.view(-1, self.global_batch_size)
num_batches = indices.shape[0]
order = torch.randperm(num_batches, generator=rng)
indices = indices[order, :]
indices = indices.view(-1)
return indices
def __iter__(self):
rng = self.init_rng()
# generate permutation
indices = torch.randperm(self.data_len, generator=rng)
# make indices evenly divisible by (batch_size * world_size)
indices = indices[:self.num_samples]
# assign batches to workers
indices = self.distribute_batches(indices)
return iter(indices)
def set_epoch(self, epoch):
"""
Sets current epoch index.
Epoch index is used to seed RNG in __iter__() function.
:param epoch: index of current epoch
"""
self.epoch = epoch
def __len__(self):
return self.num_samples // self.world_size
class ShardingSampler(DistributedSampler):
def __init__(self, dataset, batch_size, seeds, shard_size,
world_size=None, rank=None):
"""
Constructor for the ShardingSampler.
:param dataset: dataset
:param batch_size: local batch size
:param seeds: list of seeds, one seed for each training epoch
:param shard_size: number of global batches within one shard
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
super().__init__(dataset, batch_size, seeds, world_size, rank)
self.shard_size = shard_size
self.num_samples = self.data_len // self.global_batch_size \
* self.global_batch_size
def __iter__(self):
rng = self.init_rng()
# generate permutation
indices = torch.randperm(self.data_len, generator=rng)
# make indices evenly divisible by (batch_size * world_size)
indices = indices[:self.num_samples]
# splits the dataset into chunks of 'self.shard_size' global batches
# each, sorts by (src + tgt) sequence length within each chunk,
# reshuffles all global batches
shard_size = self.global_batch_size * self.shard_size
nshards = (self.num_samples + shard_size - 1) // shard_size
lengths = self.dataset.lengths[indices]
shards = [indices[i * shard_size:(i+1) * shard_size] for i in range(nshards)]
len_shards = [lengths[i * shard_size:(i+1) * shard_size] for i in range(nshards)]
# sort by (src + tgt) sequence length within each shard
indices = []
for len_shard in len_shards:
_, ind = len_shard.sort()
indices.append(ind)
output = tuple(shard[idx] for shard, idx in zip(shards, indices))
# build batches
indices = torch.cat(output)
# perform global reshuffle of all global batches
indices = self.reshuffle_batches(indices, rng)
# distribute batches to individual workers
indices = self.distribute_batches(indices)
return iter(indices)
class BucketingSampler(DistributedSampler):
def __init__(self, dataset, batch_size, seeds, num_buckets,
world_size=None, rank=None):
"""
Constructor for the BucketingSampler.
:param dataset: dataset
:param batch_size: local batch size
:param seeds: list of seeds, one seed for each training epoch
:param num_buckets: number of buckets
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
super().__init__(dataset, batch_size, seeds, world_size, rank)
self.num_buckets = num_buckets
bucket_width = (dataset.max_len + num_buckets - 1) // num_buckets
# assign sentences to buckets based on src and tgt sequence lengths
bucket_ids = torch.max(dataset.src_lengths // bucket_width,
dataset.tgt_lengths // bucket_width)
bucket_ids.clamp_(0, num_buckets - 1)
# build buckets
all_indices = torch.arange(self.data_len)
self.buckets = []
self.num_samples = 0
global_bs = self.global_batch_size
for bid in range(num_buckets):
# gather indices for current bucket
indices = all_indices[bucket_ids == bid]
self.buckets.append(indices)
# count number of samples in current bucket
samples = len(indices) // global_bs * global_bs
self.num_samples += samples
def __iter__(self):
rng = self.init_rng()
global_bs = self.global_batch_size
indices = []
for bid in range(self.num_buckets):
# random shuffle within current bucket
perm = torch.randperm(len(self.buckets[bid]), generator=rng)
bucket_indices = self.buckets[bid][perm]
# make bucket_indices evenly divisible by global batch size
length = len(bucket_indices) // global_bs * global_bs
bucket_indices = bucket_indices[:length]
assert len(bucket_indices) % self.global_batch_size == 0
# add samples from current bucket to indices for current epoch
indices.append(bucket_indices)
indices = torch.cat(indices)
assert len(indices) % self.global_batch_size == 0
# perform global reshuffle of all global batches
indices = self.reshuffle_batches(indices, rng)
# distribute batches to individual workers
indices = self.distribute_batches(indices)
return iter(indices)
class StaticDistributedSampler(Sampler):
def __init__(self, dataset, batch_size, pad, repeat=1, world_size=None, rank=None):
"""
Constructor for the StaticDistributedSampler.
:param dataset: dataset
:param batch_size: local batch size
:param pad: if True: pads dataset to a multiple of global_batch_size
samples
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
if world_size is None:
world_size = get_world_size()
if rank is None:
rank = get_rank()
self.world_size = world_size
global_batch_size = batch_size * world_size
data_len = len(dataset)
repeated_data_len = int(len(dataset) * repeat)
num_samples = (repeated_data_len + global_batch_size - 1) \
// global_batch_size * global_batch_size
self.num_samples = num_samples
indices = list(range(repeated_data_len))
if pad:
# pad dataset to a multiple of global_batch_size samples, uses
# sample with idx 0 as pad
indices += [0] * (num_samples - len(indices))
else:
# temporary pad to a multiple of global batch size, pads with "-1"
# which is later removed from the list of indices
indices += [-1] * (num_samples - len(indices))
indices = torch.tensor(indices)
indices = indices.view(-1, batch_size)
indices = indices[rank::world_size].contiguous()
indices = indices.view(-1)
# remove temporary pad
indices = indices[indices != -1]
indices = indices % data_len
indices = indices.tolist()
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | engineCache | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_ENGINECACHE_H
#define TT2I_ENGINECACHE_H
#include "trtPtr.h"
#include "NvInfer.h"
#include <memory>
#include <mutex>
#include <string>
#include <vector>
namespace tts
{
class EngineCache
{
public:
/**
* @brief Create a new EngineCache object.
*
* @param logger The logger to use.
*/
EngineCache(std::shared_ptr<nvinfer1::ILogger> logger);
/**
* @brief Load a single TRT engine from a file.
*
* @param name The name of the file.
*
* @return The instantiated engine.
*/
TRTPtr<nvinfer1::ICudaEngine> load(const std::string& name);
/**
* @brief Load multiple TRT engines from a single file.
*
* @param name The name of the file.
*
* @return The instantiated engines.
*/
std::vector<TRTPtr<nvinfer1::ICudaEngine>>
loadComposite(const std::string& name);
/**
* @brief Check if an engine is available for loading.
*
* @param name The filename.
*
* @return True if the file exists and is accessible.
*/
bool has(const std::string& name) const;
/**
* @brief Save the given engine to a file.
*
* @param engine The engine to save.
* @param name The filename.
*/
void save(const nvinfer1::ICudaEngine& engine, const std::string& name);
/**
* @brief Save multiple engines to a single file.
*
* @param engines The set of engines to save.
* @param name The name of the file to save the engines to.
*/
void save(
const std::vector<TRTPtr<nvinfer1::ICudaEngine>>& engines,
const std::string& name);
private:
std::shared_ptr<nvinfer1::ILogger> mLogger;
static TRTPtr<nvinfer1::IRuntime> mRuntime;
static std::mutex mMutex;
};
} // namespace tts
#endif
|
.github/ISSUE_TEMPLATE | ISSUE_TEMPLATE | bug_report | ---
name: Bug report
about: Create a report to help us improve
title: "[Model/Framework] What is the problem?"
labels: bug
assignees: ''
---
Related to **Model/Framework(s)**
*(e.g. GNMT/PyTorch or FasterTransformer/All)*
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Install '...'
2. Set "..."
2. Launch '...'
**Expected behavior**
A clear and concise description of what you expected to happen.
**Environment**
Please provide at least:
* Container version (e.g. pytorch:19.05-py3):
* GPUs in the system: (e.g. 8x Tesla V100-SXM2-16GB):
* CUDA driver version (e.g. 418.67):
|
PyTorch/Detection/Efficientdet/effdet | effdet | evaluator | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
import abc
import json
from .distributed import synchronize, is_main_process, all_gather_container
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import numpy as np
import itertools
def create_small_table(small_dict):
"""
Create a small table using the keys of small_dict as headers. This is only
suitable for small dictionaries.
Args:
small_dict (dict): a result dictionary of only a few items.
Returns:
str: the table as a string.
"""
keys, values = tuple(zip(*small_dict.items()))
table = tabulate(
[values],
headers=keys,
tablefmt="pipe",
floatfmt=".3f",
stralign="center",
numalign="center",
)
return table
class Evaluator:
def __init__(self):
pass
@abc.abstractmethod
def add_predictions(self, output, target):
pass
@abc.abstractmethod
def evaluate(self):
pass
class COCOEvaluator(Evaluator):
def __init__(self, coco_api, distributed=False, waymo=False):
super().__init__()
self.coco_api = coco_api
self.distributed = distributed
self.distributed_device = None
self.img_ids = []
self.predictions = []
self.waymo = waymo
def reset(self):
self.img_ids = []
self.predictions = []
def add_predictions(self, detections, target):
if self.distributed:
if self.distributed_device is None:
# cache for use later to broadcast end metric
self.distributed_device = detections.device
synchronize()
detections = all_gather_container(detections)
#target = all_gather_container(target)
sample_ids = all_gather_container(target['img_id'])
if not is_main_process():
return
else:
sample_ids = target['img_id']
detections = detections.cpu()
sample_ids = sample_ids.cpu()
for index, sample in enumerate(detections):
image_id = int(sample_ids[index])
for det in sample:
score = float(det[4])
if score < .001: # stop when below this threshold, scores in descending order
break
coco_det = dict(
image_id=image_id,
bbox=det[0:4].tolist(),
score=score,
category_id=int(det[5]))
self.img_ids.append(image_id)
self.predictions.append(coco_det)
def evaluate(self):
if not self.distributed or dist.get_rank() == 0:
assert len(self.predictions)
json.dump(self.predictions, open('./temp.json', 'w'), indent=4)
results = self.coco_api.loadRes('./temp.json')
coco_eval = COCOeval(self.coco_api, results, 'bbox')
coco_eval.params.imgIds = self.img_ids # score only ids we've used
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
metric = coco_eval.stats[0] # mAP 0.5-0.95
if self.waymo:
results = self._derive_coco_results(coco_eval, iou_type="bbox", class_names=['Vehicle', 'Pedestrian', 'Cyclist'])
if self.distributed:
dist.broadcast(torch.tensor(metric, device=self.distributed_device), 0)
else:
metric = torch.tensor(0, device=self.distributed_device)
dist.broadcast(metric, 0)
metric = metric.item()
self.reset()
return metric
def save_predictions(self, file_path):
if not self.distributed or dist.get_rank() == 0:
assert len(self.predictions)
json.dump(self.predictions, open(file_path, 'w'), indent=4)
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
print("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
print(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
print("Note that some metrics cannot be computed.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
print("Per-category {} AP: \n".format(iou_type) + table)
results.update({"AP-" + name: ap for name, ap in results_per_category})
# get index for threshold closest to coco api iouThrs
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
# Per category waymo eval
waymo_results_per_category = []
# For waymo evaluation, we find AP at specific IoUs for each object
# Vehicle @ IoU 0.7, Pedestrian/Cyclist @ IoU 0.5
# IoU thresholds defined in coco api:
# iouThrs = np.array([0.5 , 0.55, 0.6 , 0.65, 0.7 , 0.75, 0.8 , 0.85, 0.9 , 0.95])
thresholds = [.7, .5, .5]
threshold_ids = [_get_thr_ind(coco_eval, thr) for thr in thresholds]
mean_precision = np.array([])
for idx, name in enumerate(class_names):
# get precision at specific iouThr
precision = precisions[threshold_ids[idx], :, idx, 0, -1]
# precision for a specific category and specific iou threshold
precision = precision[precision > -1]
mean_precision = np.append(mean_precision, precision)
ap = np.mean(precision) if precision.size else float("nan")
waymo_results_per_category.append(("{}".format(name), float(ap * 100)))
# compute mAP (Waymo evaluation format
# AP (all categories)
# L2 (easy + hard detections)
# ALL_NS (all categories except stop signs))
ap = np.mean(mean_precision) if mean_precision.size else float("nan")
waymo_results_per_category = [("L2_ALL_NS", float(ap * 100))] + waymo_results_per_category
# tabulate waymo evaluation results
results_flatten = list(itertools.chain(*waymo_results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::len(results_flatten)] for i in range(len(results_flatten))])
headers = [("category", "mAP")] + \
[("category", "AP @ IoU {}".format(coco_eval.params.iouThrs[threshold_ids[i]]))
for i in range(len(threshold_ids))]
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=list(itertools.chain(*headers)),
numalign="left",
)
print("Waymo Evaluation: {} AP: \n".format(iou_type) + table)
results.update({"WaymoAP" + name: ap for name, ap in waymo_results_per_category})
return results
class FastMapEvalluator(Evaluator):
def __init__(self, distributed=False):
super().__init__()
self.distributed = distributed
self.predictions = []
def add_predictions(self, output, target):
pass
def evaluate(self):
pass |
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf | tf | deploy_monolithic | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import json
import os
import tensorflow as tf
from tensorflow.python.saved_model import save_options
from nn.embedding import DualEmbeddingGroup
from nn.dense_model import DenseModel
class SparseModel(tf.keras.Model):
def __init__(self, cardinalities, output_dim, memory_threshold):
super().__init__()
self.cardinalities = cardinalities
self.output_dim = output_dim
self.embedding = DualEmbeddingGroup(cardinalities, output_dim, memory_threshold, use_mde_embeddings=False)
@tf.function
def call(self, x):
x = self.embedding(x)
x = tf.reshape(x, [-1, len(self.cardinalities) * self.output_dim])
return x
class Model(tf.keras.Model):
def __init__(self, sparse_submodel, dense_submodel, cpu):
super().__init__()
self.sparse_submodel = sparse_submodel
self.dense_submodel = dense_submodel
self.cpu = cpu
def call(self, numerical_features, cat_features):
device = '/CPU:0' if self.cpu else '/GPU:0'
with tf.device(device):
embedding_outputs = self.sparse_submodel(cat_features)
y = self.dense_submodel(numerical_features, embedding_outputs)
return y
def load_dense(src, model_precision, model_format):
dense_model = DenseModel.from_config(os.path.join(src, "config.json"))
if dense_model.amp and model_precision == "fp16" and model_format == 'tf-savedmodel':
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
if dense_model.interaction == 'dot_custom_cuda':
dense_model.interaction = 'dot_tensorflow'
dense_model._create_interaction_op()
dense_model.load_weights(os.path.join(src, "dense"))
dense_model.transpose = False
dense_model.force_initialization(training=False)
return dense_model
def deploy_monolithic(
sparse_src,
dense_src,
dst,
model_name,
max_batch_size,
engine_count_per_device,
num_gpus=1,
version="1",
cpu=False,
model_precision='fp32'
):
if model_precision == 'fp16':
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
dense_model = load_dense(src=dense_src, model_precision=model_precision, model_format='tf-savedmodel')
print("deploy monolithic dst: ", dst)
with open(os.path.join(sparse_src, "config.json")) as f:
src_config = json.load(f)
num_cat_features = len(src_config["categorical_cardinalities"])
src_paths = [os.path.join(sparse_src, f"feature_{i}.npy") for i in range(num_cat_features)]
sparse_model = SparseModel(cardinalities=src_config["categorical_cardinalities"],
output_dim=src_config['embedding_dim'][0],
memory_threshold=75 if not cpu else 0)
model = Model(sparse_submodel=sparse_model, dense_submodel=dense_model, cpu=cpu)
dummy_batch_size = 65536
dummy_categorical = tf.zeros(shape=(dummy_batch_size, len(src_config["categorical_cardinalities"])), dtype=tf.int32)
dummy_numerical = tf.zeros(shape=(dummy_batch_size, dense_model.num_numerical_features), dtype=tf.float32)
_ = model(numerical_features=dummy_numerical, cat_features=dummy_categorical)
options = save_options.SaveOptions(experimental_variable_policy=save_options.VariablePolicy.SAVE_VARIABLE_DEVICES)
savedmodel_dir = os.path.join(dst, model_name, version, 'model.savedmodel')
os.makedirs(savedmodel_dir)
tf.keras.models.save_model(model=model, filepath=savedmodel_dir, overwrite=True, options=options)
|
PyTorch/LanguageModeling/BERT/triton/large/runner | runner | start_NVIDIA-A30 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Install Docker
. /etc/os-release && \
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \
echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \
curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \
apt-get update && \
apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2
# Install packages
pip install -r triton/runner/requirements.txt
# Evaluate Runner
python3 -m "triton.large.runner.__main__" \
--config-path "triton/large/runner/config_NVIDIA-A30.yaml" \
--device 0 |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trtis | trtis | CustomOutputWriter | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CustomOutputWriter.hpp"
#include <algorithm>
#include <cstring>
using namespace tts;
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
CustomOutputWriter::CustomOutputWriter() :
TimedObject("CustomOutputWriter::write()")
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void CustomOutputWriter::write(
void* const outputContext,
CustomGetOutputFn_t outputFn,
const int batchSize,
const int samplesSpacing,
const float* const samplesHost,
const int32_t* const lengthsHost)
{
startTiming();
// determine maximum audio length
int32_t maxLength
= batchSize > 0 ? *std::max_element(lengthsHost, lengthsHost + batchSize)
: 0;
// output audio
{
std::vector<int64_t> outputDims{batchSize, static_cast<int64_t>(maxLength)};
float* hostMem;
if (!outputFn(
outputContext,
"OUTPUT",
outputDims.size(),
outputDims.data(),
sizeof(*samplesHost) * maxLength * batchSize,
(void**)&hostMem)) {
throw std::runtime_error("CustomGetOutputFn_t returned false.");
}
for (int i = 0; i < batchSize; ++i) {
std::memcpy(
hostMem + maxLength * i,
samplesHost + samplesSpacing * i,
maxLength * sizeof(*samplesHost));
}
}
// output lengths
{
std::vector<int64_t> lengthDims{batchSize, 1};
int32_t* hostMemLen;
if (!outputFn(
outputContext,
"OUTPUT_LENGTH",
lengthDims.size(),
lengthDims.data(),
sizeof(*lengthsHost) * batchSize,
(void**)&hostMemLen)) {
throw std::runtime_error("CustomGetOutputFn_t returned false.");
}
std::copy(lengthsHost, lengthsHost + batchSize, hostMemLen);
}
stopTiming();
}
|
PyTorch/LanguageModeling/BERT/distillation | distillation | hooks | # coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
class DistillHooks():
"""Implements hooks that can extract any intermediate
output/state in a model's forward pass for distillation.
"""
def __init__(self, config):
"""
Intermediate states extracted by `self.child_to_main_hook`
are saved in `module.distill_states_dict`
Intermediate nn.Module states extracted by `self.nn_module_hook`
as listed in `self.nn_module_states` are saved in `self.nn_module_states`
"""
#list of nn_module_names to register extraction hooks on in `self.register_nn_module_hook`
self.nn_module_names = config["nn_module_names"]
#Dictionary to store states extracted from nn module using `self.nn_module_hook`
self.nn_module_states = {}
def nn_module_hook(self, name):
"""
Method to cache output on nn.Module(s)
"""
def hook(module, input, output):
self.nn_module_states[name] = output
return hook
def register_nn_module_hook(self, module, input):
"""
Method to register hook on nn.module directly.
With this method, user can obtain output from
nn.module without having to explicity add lines
to cache state in the nn.module itself, or if user
has no access to the `fwd` method of the module
Example: models from torchvision
Typically used in models where model definition
or the forward pass in inaccessible. Intermediate
states will be stored in self.nn_module_state
with key being the name of the module.
Hook has to be deleted after the very first forward pass
to avoid registering `nn_module_hook` on modules listed in
`self.nn_module_names` with every fwd pass
Example:
model = MyModel()
distill_hooks = DistillHooks(config)
model_pre_hook = model.register_forward_pre_hook(distill_hooks.register_nn_module_hook)
for idx, batch in enumerate(train_dataloader):
if idx == 1:
model_pre_hook.remove()
"""
for name, i in module.named_modules():
if name in self.nn_module_names:
i.register_forward_hook(self.nn_module_hook(name))
print("registered `nn_module_hook` on", name)
def child_to_main_hook(self, module, input, output):
"""
Method to recursively fetch all intermediate states cached in children modules and store in parent module
"""
module.distill_states_dict = OrderedDict()
for name, i in module.named_modules():
if hasattr(i, 'distill_state_dict'):
module.distill_states_dict[name] = i.distill_state_dict
def flatten_states(state_dict, state_name):
"""
Method to iterate across all intermediate states cached in a dictionary,
extract a certain state based on `state_name` and append to a list
"""
extracted_states = []
for key, value in state_dict.items():
if state_name in value:
extracted_states.append(value[state_name])
return extracted_states
|
PyTorch/Segmentation/MaskRCNN/pytorch/scripts | scripts | train | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#8 V100/A100 x 12 batch_per_gpu
GPU=8
CONFIG='configs/e2e_mask_rcnn_R_50_FPN_1x.yaml'
RESULTS='/results'
LOGFILE="$RESULTS/joblog.log"
if ! [ -d "$RESULTS" ]; then mkdir $RESULTS; fi
#Use a different argument with DATASET.TRAIN to use your own
python -m torch.distributed.launch --nproc_per_node=$GPU tools/train_net.py \
--config-file $CONFIG \
DTYPE "${DTYPE:-float16}" \
NHWC "${NHWC:-True}" \
DATALOADER.HYBRID "${HYBRID:-True}" \
OUTPUT_DIR $RESULTS \
| tee $LOGFILE
|
TensorFlow/LanguageModeling/BERT | BERT | optimization | # coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from horovod.tensorflow.compression import Compression
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, hvd=None, manual_fp16=False, use_fp16=False, num_accumulation_steps=1,
optimizer_type="adam", allreduce_post_accumulation=False, init_loss_scale=2**32):
"""Creates an optimizer training op."""
global_step = tf.compat.v1.train.get_or_create_global_step()
# avoid step change in learning rate at end of warmup phase
if optimizer_type == "adam":
power = 1.0
decayed_learning_rate_at_crossover_point = init_lr * (
(1.0 - float(num_warmup_steps) / float(num_train_steps)) ** power)
else:
power = 0.5
decayed_learning_rate_at_crossover_point = init_lr
adjusted_init_lr = init_lr * (init_lr / decayed_learning_rate_at_crossover_point)
print('decayed_learning_rate_at_crossover_point = %e, adjusted_init_lr = %e' % (decayed_learning_rate_at_crossover_point, adjusted_init_lr))
learning_rate = tf.constant(value=adjusted_init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.compat.v1.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=power,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
if optimizer_type == "lamb":
print("Initializing LAMB Optimizer")
optimizer = LAMBOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
else:
print("Initializing ADAM Weight Decay Optimizer")
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if hvd is not None and (num_accumulation_steps == 1 or (not allreduce_post_accumulation)):
optimizer = hvd.DistributedOptimizer(optimizer, sparse_as_dense=True, compression=Compression.fp16 if use_fp16 or manual_fp16 else Compression.none)
if use_fp16:
loss_scaler = tf.train.experimental.DynamicLossScale(initial_loss_scale=init_loss_scale, increment_period=1000, multiplier=2.0)
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scaler)
loss_scale_value = tf.identity(loss_scaler(), name="loss_scale")
if manual_fp16:
loss_scale_manager = tf.contrib.mixed_precision.ExponentialUpdateLossScaleManager(init_loss_scale=init_loss_scale,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
decr_ratio=0.5)
optimizer = tf.contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager)
tvars = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss * 1.0 / num_accumulation_steps, tvars)
if num_accumulation_steps > 1:
local_step = tf.get_variable(name="local_step", shape=[], dtype=tf.int32, trainable=False,
initializer=tf.zeros_initializer)
batch_finite = tf.get_variable(name="batch_finite", shape=[], dtype=tf.bool, trainable=False,
initializer=tf.ones_initializer)
accum_vars = [tf.get_variable(
name=tvar.name.split(":")[0] + "/accum",
shape=tvar.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer()) for tvar in tf.trainable_variables()]
reset_step = tf.cast(tf.math.equal(local_step % num_accumulation_steps, 0), dtype=tf.bool)
local_step = tf.cond(reset_step, lambda:local_step.assign(tf.ones_like(local_step)), lambda:local_step.assign_add(1))
grads_and_vars_and_accums = [(gv[0],gv[1],accum_vars[i]) for i, gv in enumerate(grads_and_vars) if gv[0] is not None]
grads, tvars, accum_vars = list(zip(*grads_and_vars_and_accums))
all_are_finite = tf.reduce_all([tf.reduce_all(tf.is_finite(g)) for g in grads]) if manual_fp16 or use_fp16 else tf.constant(True, dtype=tf.bool)
batch_finite = tf.cond(reset_step,
lambda: batch_finite.assign(tf.math.logical_and(tf.constant(True, dtype=tf.bool), all_are_finite)),
lambda:batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite)))
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=1.0,
use_norm=tf.cond(
all_are_finite,
lambda: tf.global_norm(grads),
lambda: tf.constant(1.0)))
accum_vars = tf.cond(reset_step,
lambda: [accum_vars[i].assign(grad) for i, grad in enumerate(clipped_grads)],
lambda: [accum_vars[i].assign_add(grad) for i, grad in enumerate(clipped_grads)])
def update(accum_vars):
if allreduce_post_accumulation and hvd is not None:
accum_vars = [hvd.allreduce(tf.convert_to_tensor(accum_var), compression=Compression.fp16 if use_fp16 or manual_fp16 else Compression.none) if isinstance(accum_var, tf.IndexedSlices)
else hvd.allreduce(accum_var, compression=Compression.fp16 if use_fp16 or manual_fp16 else Compression.none) for accum_var in accum_vars]
return optimizer.apply_gradients(list(zip(accum_vars, tvars)), global_step=global_step)
update_step = tf.identity(tf.cast(tf.math.equal(local_step % num_accumulation_steps, 0), dtype=tf.bool), name="update_step")
update_op = tf.cond(update_step,
lambda: update(accum_vars), lambda: tf.no_op())
new_global_step = tf.cond(tf.math.logical_and(update_step,
tf.cast(hvd.allreduce(tf.cast(batch_finite, tf.int32)), tf.bool) if hvd is not None else batch_finite),
lambda: global_step+1,
lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(update_op, [global_step.assign(new_global_step)])
else:
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
grads, tvars = list(zip(*grads_and_vars))
all_are_finite = tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in grads]) if use_fp16 or manual_fp16 else tf.constant(True, dtype=tf.bool)
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=1.0,
use_norm=tf.cond(
all_are_finite,
lambda: tf.global_norm(grads),
lambda: tf.constant(1.0)))
train_op = optimizer.apply_gradients(
list(zip(clipped_grads, tvars)), global_step=global_step)
new_global_step = tf.cond(all_are_finite, lambda: global_step + 1, lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = tf.identity(learning_rate, name='learning_rate')
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None,
manual_fp16=False):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
has_shadow = manual_fp16 and param.dtype.base_dtype != tf.float32
if has_shadow:
# create shadow fp32 weights for fp16 variable
param_fp32 = tf.get_variable(
name=param_name + "/shadow",
dtype=tf.float32,
trainable=False,
initializer=tf.cast(param.initialized_value(),tf.float32))
else:
param_fp32 = param
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param_fp32
update_with_lr = self.learning_rate * update
next_param = param_fp32 - update_with_lr
if has_shadow:
# cast shadow fp32 weights to fp16 and assign to trainable variable
param.assign(tf.cast(next_param, param.dtype.base_dtype))
assignments.extend(
[param_fp32.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
class LAMBOptimizer(tf.compat.v1.train.Optimizer):
"""A LAMB optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="LAMBOptimizer"):
"""Constructs a LAMBOptimizer."""
super(LAMBOptimizer, self).__init__(False, name)
self.learning_rate = tf.identity(learning_rate, name='learning_rate')
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step, name=None,
manual_fp16=False):
"""See base class."""
assignments = []
steps = tf.cast(global_step, tf.float32)
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
has_shadow = manual_fp16 and param.dtype.base_dtype != tf.float32
if has_shadow:
# create shadow fp32 weights for fp16 variable
param_fp32 = tf.get_variable(
name=param_name + "/shadow",
dtype=tf.float32,
trainable=False,
initializer=tf.cast(param.initialized_value(),tf.float32))
else:
param_fp32 = param
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# LAMB update
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
beta1_correction = (1 - self.beta_1 ** steps)
beta2_correction = (1 - self.beta_2 ** steps)
next_m_unbiased = next_m / beta1_correction
next_v_unbiased = next_v / beta2_correction
update = next_m_unbiased / (tf.sqrt(next_v_unbiased) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param_fp32
w_norm = linalg_ops.norm(param, ord=2)
g_norm = linalg_ops.norm(update, ord=2)
ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(
math_ops.greater(g_norm, 0), (w_norm / g_norm), 1.0), 1.0)
update_with_lr = ratio * self.learning_rate * update
next_param = param_fp32 - update_with_lr
if has_shadow:
# cast shadow fp32 weights to fp16 and assign to trainable variable
param.assign(tf.cast(next_param, param.dtype.base_dtype))
assignments.extend(
[param_fp32.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
PyTorch/Forecasting/TFT | TFT | ema | # Copyright 2021-2022 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 Ross Wightman
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Exponential Moving Average (EMA) of model updates
"""
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
class ModelEma(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
"""
def __init__(self, model, decay=0.999, device=None):
super().__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def update(self, model):
update_fn=lambda ema_v, model_v: self.decay * ema_v + (1. - self.decay) * model_v
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def set(self, model):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_( model_v )
def forward(self, x):
return self.module(x)
|
PyTorch/Segmentation/nnUNet/triton | triton | model | from nnunet.nn_unet import NNUnet
def get_model(*, checkpoint_dir: str, precision: str, data_dir: str):
model = NNUnet.load_from_checkpoint(checkpoint_dir, data_dir=data_dir, triton=True, strict=False)
model = model.cuda()
if "fp16" in precision:
model = model.half()
model.eval()
tensor_names = {"inputs": ["INPUT__0"], "outputs": ["OUTPUT__0"]}
return model, tensor_names
|
PyTorch/Detection/Efficientdet/data | data | transforms | """ COCO transforms (quick and dirty)
Hacked together by Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from PIL import Image
import numpy as np
import random
import math
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
class ImageToNumpy:
def __call__(self, pil_img, annotations: dict):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW
return np_img, annotations
class ImageToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img, annotations: dict):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype), annotations
class TargetToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img, annotations: dict):
annotations['bbox'] = torch.from_numpy(annotations['bbox']).to(dtype=self.dtype)
annotations['cls'] = torch.from_numpy(annotations['cls']).to(dtype=torch.int64)
return pil_img, annotations
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
def clip_boxes_(boxes, img_size):
height, width = img_size
clip_upper = np.array([height, width] * 2, dtype=boxes.dtype)
np.clip(boxes, 0, clip_upper, out=boxes)
def clip_boxes(boxes, img_size):
clipped_boxes = boxes.copy()
clip_boxes_(clipped_boxes, img_size)
return clipped_boxes
def _size_tuple(size):
if isinstance(size, int):
return size, size
else:
assert len(size) == 2
return size
class ResizePad:
def __init__(self, target_size: int, interpolation: str = 'bilinear', fill_color: tuple = (0, 0, 0)):
self.target_size = _size_tuple(target_size)
self.interpolation = interpolation
self.fill_color = fill_color
def __call__(self, img, anno: dict):
width, height = img.size
img_scale_y = self.target_size[0] / height
img_scale_x = self.target_size[1] / width
img_scale = min(img_scale_y, img_scale_x)
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
new_img = Image.new("RGB", (self.target_size[1], self.target_size[0]), color=self.fill_color)
interp_method = _pil_interp(self.interpolation)
img = img.resize((scaled_w, scaled_h), interp_method)
new_img.paste(img)
if 'bbox' in anno:
# FIXME haven't tested this path since not currently using dataset annotations for train/eval
bbox = anno['bbox']
bbox[:, :4] *= img_scale
clip_boxes_(bbox, (scaled_h, scaled_w))
valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)
anno['bbox'] = bbox[valid_indices, :]
anno['cls'] = anno['cls'][valid_indices]
anno['img_scale'] = 1. / img_scale # back to original
return new_img, anno
class RandomResizePad:
def __init__(self, target_size: int, scale: tuple = (0.1, 2.0), interpolation: str = 'bilinear',
fill_color: tuple = (0, 0, 0)):
self.target_size = _size_tuple(target_size)
self.scale = scale
self.interpolation = interpolation
self.fill_color = fill_color
def _get_params(self, img):
# Select a random scale factor.
scale_factor = random.uniform(*self.scale)
scaled_target_height = scale_factor * self.target_size[0]
scaled_target_width = scale_factor * self.target_size[1]
# Recompute the accurate scale_factor using rounded scaled image size.
width, height = img.size
img_scale_y = scaled_target_height / height
img_scale_x = scaled_target_width / width
img_scale = min(img_scale_y, img_scale_x)
# Select non-zero random offset (x, y) if scaled image is larger than target size
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
offset_y = scaled_h - self.target_size[0]
offset_x = scaled_w - self.target_size[1]
offset_y = int(max(0.0, float(offset_y)) * random.uniform(0, 1))
offset_x = int(max(0.0, float(offset_x)) * random.uniform(0, 1))
return scaled_h, scaled_w, offset_y, offset_x, img_scale
def __call__(self, img, anno: dict):
scaled_h, scaled_w, offset_y, offset_x, img_scale = self._get_params(img)
interp_method = _pil_interp(self.interpolation)
img = img.resize((scaled_w, scaled_h), interp_method)
right, lower = min(scaled_w, offset_x + self.target_size[1]), min(scaled_h, offset_y + self.target_size[0])
img = img.crop((offset_x, offset_y, right, lower))
new_img = Image.new("RGB", (self.target_size[1], self.target_size[0]), color=self.fill_color)
new_img.paste(img)
if 'bbox' in anno:
# FIXME not fully tested
bbox = anno['bbox'].copy() # FIXME copy for debugger inspection, back to inplace
bbox[:, :4] *= img_scale
box_offset = np.stack([offset_y, offset_x] * 2)
bbox -= box_offset
clip_boxes_(bbox, (scaled_h, scaled_w))
valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)
anno['bbox'] = bbox[valid_indices, :]
anno['cls'] = anno['cls'][valid_indices]
anno['img_scale'] = 1. / img_scale # back to original
return new_img, anno
class RandomFlip:
def __init__(self, horizontal=True, vertical=False, prob=0.5):
self.horizontal = horizontal
self.vertical = vertical
self.prob = prob
def _get_params(self):
do_horizontal = random.random() < self.prob if self.horizontal else False
do_vertical = random.random() < self.prob if self.vertical else False
return do_horizontal, do_vertical
def __call__(self, img, annotations: dict):
do_horizontal, do_vertical = self._get_params()
width, height = img.size
def _fliph(bbox):
x_max = width - bbox[:, 1]
x_min = width - bbox[:, 3]
bbox[:, 1] = x_min
bbox[:, 3] = x_max
def _flipv(bbox):
y_max = height - bbox[:, 0]
y_min = height - bbox[:, 2]
bbox[:, 0] = y_min
bbox[:, 2] = y_max
if do_horizontal and do_vertical:
img = img.transpose(Image.ROTATE_180)
if 'bbox' in annotations:
_fliph(annotations['bbox'])
_flipv(annotations['bbox'])
elif do_horizontal:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if 'bbox' in annotations:
_fliph(annotations['bbox'])
elif do_vertical:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
if 'bbox' in annotations:
_flipv(annotations['bbox'])
return img, annotations
def resolve_fill_color(fill_color, img_mean=IMAGENET_DEFAULT_MEAN):
if isinstance(fill_color, tuple):
assert len(fill_color) == 3
fill_color = fill_color
else:
try:
int_color = int(fill_color)
fill_color = (int_color,) * 3
except ValueError:
assert fill_color == 'mean'
fill_color = tuple([int(round(255 * x)) for x in img_mean])
return fill_color
class Compose:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, img, annotations: dict):
for t in self.transforms:
img, annotations = t(img, annotations)
return img, annotations
def transforms_coco_eval(
img_size=224,
interpolation='bilinear',
use_prefetcher=False,
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
fill_color = resolve_fill_color(fill_color, mean)
image_tfl = [
ResizePad(
target_size=img_size, interpolation=interpolation, fill_color=fill_color),
TargetToTensor(),
ImageToNumpy(),
]
assert use_prefetcher, "Only supporting prefetcher usage right now"
image_tf = Compose(image_tfl)
return image_tf
def transforms_coco_train(
img_size=224,
interpolation='random',
use_prefetcher=False,
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
fill_color = resolve_fill_color(fill_color, mean)
image_tfl = [
RandomFlip(horizontal=True, prob=0.5),
RandomResizePad(
target_size=img_size, interpolation=interpolation, fill_color=fill_color),
TargetToTensor(),
ImageToNumpy(),
]
assert use_prefetcher, "Only supporting prefetcher usage right now"
image_tf = Compose(image_tfl)
return image_tf
|
PyTorch/SpeechRecognition/QuartzNet | QuartzNet | nemo_dle_model_converter | import argparse
import io
import sys
from copy import deepcopy
from functools import reduce
from pathlib import Path
from subprocess import CalledProcessError, check_output
import torch
import yaml
import quartznet.config
from common import helpers
from common.features import FilterbankFeatures
from quartznet.config import load as load_yaml
from quartznet.model import QuartzNet, MaskedConv1d
# Corresponding DLE <-> NeMo config keys
cfg_key_map = {
("input_val", "audio_dataset", "sample_rate"): ("preprocessor", "sample_rate"),
("input_val", "filterbank_features", "dither"): ("preprocessor", "dither"),
("input_val", "filterbank_features", "frame_splicing"): ("preprocessor", "frame_splicing"),
("input_val", "filterbank_features", "n_fft"): ("preprocessor", "n_fft"),
("input_val", "filterbank_features", "n_filt"): ("preprocessor", "features"),
("input_val", "filterbank_features", "normalize"): ("preprocessor", "normalize"),
("input_val", "filterbank_features", "sample_rate"): ("preprocessor", "sample_rate"),
("input_val", "filterbank_features", "window"): ("preprocessor", "window"),
("input_val", "filterbank_features", "window_size"): ("preprocessor", "window_size"),
("input_val", "filterbank_features", "window_stride"): ("preprocessor", "window_stride"),
("labels",): ("decoder", "vocabulary"),
("quartznet", "decoder", "in_feats"): ("decoder", "feat_in"),
("quartznet", "encoder", "activation"): ("encoder", "activation"),
("quartznet", "encoder", "blocks"): ("encoder", "jasper"),
("quartznet", "encoder", "frame_splicing"): ("preprocessor", "frame_splicing"),
("quartznet", "encoder", "in_feats"): ("encoder", "feat_in"),
("quartznet", "encoder", "use_conv_masks"): ("encoder", "conv_mask"),
}
def load_nemo_ckpt(fpath):
"""Make a DeepLearningExamples state_dict and config from a .nemo file."""
try:
cmd = ['tar', 'Oxzf', fpath, './model_config.yaml']
nemo_cfg = yaml.safe_load(io.BytesIO(check_output(cmd)))
cmd = ['tar', 'Oxzf', fpath, './model_weights.ckpt']
ckpt = torch.load(io.BytesIO(check_output(cmd)), map_location="cpu")
except (FileNotFoundError, CalledProcessError):
print('WARNING: Could not uncompress with tar. '
'Falling back to the tarfile module (might take a few minutes).')
import tarfile
with tarfile.open(fpath, "r:gz") as tar:
f = tar.extractfile(tar.getmember("./model_config.yaml"))
nemo_cfg = yaml.safe_load(f)
f = tar.extractfile(tar.getmember("./model_weights.ckpt"))
ckpt = torch.load(f, map_location="cpu")
remap = lambda k: (k.replace("encoder.encoder", "encoder.layers")
.replace("decoder.decoder_layers", "decoder.layers")
.replace("conv.weight", "weight"))
dle_ckpt = {'state_dict': {remap(k): v for k, v in ckpt.items()
if "preproc" not in k}}
dle_cfg = config_from_nemo(nemo_cfg)
return dle_ckpt, dle_cfg
def save_nemo_ckpt(dle_ckpt, dle_cfg, dest_path):
"""Save a DeepLearningExamples model as a .nemo file."""
cfg = deepcopy(dle_cfg)
dle_ckpt = torch.load(dle_ckpt, map_location="cpu")["ema_state_dict"]
# Build a DLE model instance and fill with weights
symbols = helpers.add_ctc_blank(cfg['labels'])
enc_kw = quartznet.config.encoder(cfg)
dec_kw = quartznet.config.decoder(cfg, n_classes=len(symbols))
model = QuartzNet(enc_kw, dec_kw)
model.load_state_dict(dle_ckpt, strict=True)
# Reaname core modules, e.g., encoder.layers -> encoder.encoder
model.encoder._modules['encoder'] = model.encoder._modules.pop('layers')
model.decoder._modules['decoder_layers'] = model.decoder._modules.pop('layers')
# MaskedConv1d is made via composition in NeMo, and via inheritance in DLE
# Params for MaskedConv1d in NeMo have an additional '.conv.' infix
def rename_convs(module):
for name in list(module._modules.keys()):
submod = module._modules[name]
if isinstance(submod, MaskedConv1d):
module._modules[f'{name}.conv'] = module._modules.pop(name)
else:
rename_convs(submod)
rename_convs(model.encoder.encoder)
# Use FilterbankFeatures to calculate fbanks and store with model weights
feature_processor = FilterbankFeatures(
**dle_cfg['input_val']['filterbank_features'])
nemo_ckpt = model.state_dict()
nemo_ckpt["preprocessor.featurizer.fb"] = feature_processor.fb
nemo_ckpt["preprocessor.featurizer.window"] = feature_processor.window
nemo_cfg = config_to_nemo(dle_cfg)
# Prepare the directory for zipping
ckpt_files = dest_path / "ckpt_files"
ckpt_files.mkdir(exist_ok=True, parents=False)
with open(ckpt_files / "model_config.yaml", "w") as f:
yaml.dump(nemo_cfg, f)
torch.save(nemo_ckpt, ckpt_files / "model_weights.ckpt")
with tarfile.open(dest_path / "quartznet.nemo", "w:gz") as tar:
tar.add(ckpt_files, arcname="./")
def save_dle_ckpt(ckpt, cfg, dest_dir):
torch.save(ckpt, dest_dir / "model.pt")
with open(dest_dir / "model_config.yaml", "w") as f:
yaml.dump(cfg, f)
def set_nested_item(tgt, src, tgt_keys, src_keys):
"""Assigns nested dict keys, e.g., d1[a][b][c] = d2[e][f][g][h]."""
tgt_nested = reduce(lambda d, k: d[k], tgt_keys[:-1], tgt)
tgt_nested[tgt_keys[-1]] = reduce(lambda d, k: d[k], src_keys, src)
def config_from_nemo(nemo_cfg):
"""Convert a DeepLearningExamples config to a NeMo format."""
dle_cfg = {
'name': 'QuartzNet',
'input_val': {
'audio_dataset': {
'normalize_transcripts': True,
},
'filterbank_features': {
'pad_align': 16,
},
},
'quartznet': {
'decoder': {},
'encoder': {},
},
}
for dle_keys, nemo_keys in cfg_key_map.items():
try:
set_nested_item(dle_cfg, nemo_cfg, dle_keys, nemo_keys)
except KeyError:
print(f'WARNING: Could not load config {nemo_keys} as {dle_keys}.')
# mapping kernel_size is not expressable with cfg_map
for block in dle_cfg["quartznet"]["encoder"]["blocks"]:
block["kernel_size"] = block.pop("kernel")
return dle_cfg
def config_to_nemo(dle_cfg):
"""Convert a DeepLearningExamples config to a NeMo format."""
nemo_cfg = {
"target": "nemo.collections.asr.models.ctc_models.EncDecCTCModel",
"dropout": 0.0,
"preprocessor": {
"_target_": "nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor",
"stft_conv": False,
},
"encoder": {
"_target_": "nemo.collections.asr.modules.ConvASREncoder",
"jasper": {}
},
"decoder": {
"_target_": "nemo.collections.asr.modules.ConvASRDecoder",
},
}
for dle_keys, nemo_keys in cfg_key_map.items():
try:
set_nested_item(nemo_cfg, dle_cfg, nemo_keys, dle_keys)
except KeyError:
print(f"WARNING: Could not load config {dle_keys} as {nemo_keys}.")
nemo_cfg["sample_rate"] = nemo_cfg["preprocessor"]["sample_rate"]
nemo_cfg["repeat"] = nemo_cfg["encoder"]["jasper"][1]["repeat"]
nemo_cfg["separable"] = nemo_cfg["encoder"]["jasper"][1]["separable"]
nemo_cfg["labels"] = nemo_cfg["decoder"]["vocabulary"]
nemo_cfg["decoder"]["num_classes"] = len(nemo_cfg["decoder"]["vocabulary"])
# mapping kernel_size is not expressable with cfg_map
for block in nemo_cfg["encoder"]["jasper"]:
if "kernel_size" in block:
block["kernel"] = block.pop("kernel_size")
return nemo_cfg
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="QuartzNet DLE <-> NeMo model converter.")
parser.add_argument("source_model", type=Path,
help="A DLE or NeMo QuartzNet model to be converted (.pt or .nemo, respectively)")
parser.add_argument("dest_dir", type=Path, help="Destination directory")
parser.add_argument("--dle_config_yaml", type=Path,
help="A DLE config .yaml file, required only to convert DLE -> NeMo")
args = parser.parse_args()
ext = args.source_model.suffix.lower()
if ext == ".nemo":
ckpt, cfg = load_nemo_ckpt(args.source_model)
save_dle_ckpt(ckpt, cfg, args.dest_dir)
elif ext == ".pt":
dle_cfg = load_yaml(args.dle_config_yaml)
save_nemo_ckpt(args.source_model, dle_cfg, args.dest_dir)
else:
raise ValueError(f"Unknown extension {ext}.")
print('Converted succesfully.')
|
PyTorch/LanguageModeling/Transformer-XL/pytorch | pytorch | run_text8_base | #!/bin/bash
export OMP_NUM_THREADS=1
if [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--cuda \
--data ../data/text8/ \
--dataset text8 \
--n_layer 12 \
--d_model 512 \
--n_head 8 \
--d_head 64 \
--d_inner 2048 \
--dropout 0.1 \
--dropatt 0.0 \
--optim adam \
--lr 0.00025 \
--warmup_step 0 \
--max_step 400000 \
--tgt_len 512 \
--mem_len 512 \
--eval_tgt_len 128 \
--batch_size 22 \
--multi_gpu \
--gpu0_bsz 4 \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python eval.py \
--cuda \
--data ../data/text8/ \
--dataset text8 \
--tgt_len 80 \
--mem_len 2100 \
--clamp_len 820 \
--same_length \
--split test \
${@:2}
else
echo 'unknown argment 1'
fi
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/utils | utils | distributed | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import horovod.tensorflow as hvd
def dist_print(*args, force=False, **kwargs):
if hvd.rank() == 0 or force:
print(*args, **kwargs)
|
TensorFlow2/Segmentation/nnUNet | nnUNet | evaluate | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import nibabel
import numpy as np
from tqdm import tqdm
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--preds", type=str, required=True, help="Path to predictions")
parser.add_argument("--lbls", type=str, required=True, help="Path to labels")
def get_stats(pred, targ, class_idx):
tp_ = np.logical_and(pred == class_idx, targ == class_idx).sum()
fn_ = np.logical_and(pred != class_idx, targ == class_idx).sum()
fp_ = np.logical_and(pred == class_idx, targ != class_idx).sum()
return tp_, fn_, fp_
if __name__ == "__main__":
args = parser.parse_args()
y_pred = sorted(glob.glob(os.path.join(args.preds, "*.npy")))
y_true = [os.path.join(args.lbls, os.path.basename(pred).replace("npy", "nii.gz")) for pred in y_pred]
assert len(y_pred) > 0
n_class = np.load(y_pred[0]).shape[0] - 1
dice = [[] for _ in range(n_class)]
for pr, lb in tqdm(zip(y_pred, y_true), total=len(y_pred)):
prd = np.transpose(np.argmax(np.load(pr), axis=0), (2, 1, 0))
lbl = nibabel.load(lb).get_fdata().astype(np.uint8)
for i in range(1, n_class + 1):
counts = np.count_nonzero(lbl == i) + np.count_nonzero(prd == i)
if counts == 0: # no foreground class
dice[i - 1].append(1)
else:
tp, fn, fp = get_stats(prd, lbl, i)
denum = 2 * tp + fp + fn
dice[i - 1].append(2 * tp / denum if denum != 0 else 0)
dice_score = np.mean(np.array(dice), axis=-1)
dice_cls = " ".join([f"L{i+1} {round(dice_score[i], 4)}" for i, dice in enumerate(dice_score)])
print(f"mean dice: {round(np.mean(dice_score), 4)} - {dice_cls}") |
PyTorch/Recommendation/DLRM/tests/feature_specs | feature_specs | 20_num | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
- cat_6.bin
- cat_7.bin
- cat_8.bin
- cat_9.bin
- cat_10.bin
- cat_11.bin
- cat_12.bin
- cat_13.bin
- cat_14.bin
- cat_15.bin
- cat_16.bin
- cat_17.bin
- cat_18.bin
- cat_19.bin
- cat_20.bin
- cat_21.bin
- cat_22.bin
- cat_23.bin
- cat_24.bin
- cat_25.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
- num_3
- num_4
- num_5
- num_6
- num_7
- num_8
- num_9
- num_10
- num_11
- num_12
- num_13
- num_14
- num_15
- num_16
- num_17
- num_18
- num_19
feature_spec:
cat_0.bin:
cardinality: 100000
dtype: int32
cat_1.bin:
cardinality: 100001
dtype: int32
cat_10.bin:
cardinality: 100010
dtype: int32
cat_11.bin:
cardinality: 100011
dtype: int32
cat_12.bin:
cardinality: 100012
dtype: int32
cat_13.bin:
cardinality: 100013
dtype: int32
cat_14.bin:
cardinality: 100014
dtype: int32
cat_15.bin:
cardinality: 100015
dtype: int32
cat_16.bin:
cardinality: 100016
dtype: int32
cat_17.bin:
cardinality: 100017
dtype: int32
cat_18.bin:
cardinality: 100018
dtype: int32
cat_19.bin:
cardinality: 100019
dtype: int32
cat_2.bin:
cardinality: 100002
dtype: int32
cat_20.bin:
cardinality: 100020
dtype: int32
cat_21.bin:
cardinality: 100021
dtype: int32
cat_22.bin:
cardinality: 100022
dtype: int32
cat_23.bin:
cardinality: 100023
dtype: int32
cat_24.bin:
cardinality: 100024
dtype: int32
cat_25.bin:
cardinality: 100025
dtype: int32
cat_3.bin:
cardinality: 100003
dtype: int32
cat_4.bin:
cardinality: 100004
dtype: int32
cat_5.bin:
cardinality: 100005
dtype: int32
cat_6.bin:
cardinality: 100006
dtype: int32
cat_7.bin:
cardinality: 100007
dtype: int32
cat_8.bin:
cardinality: 100008
dtype: int32
cat_9.bin:
cardinality: 100009
dtype: int32
label:
dtype: bool
num_0:
dtype: float16
num_1:
dtype: float16
num_10:
dtype: float16
num_11:
dtype: float16
num_12:
dtype: float16
num_13:
dtype: float16
num_14:
dtype: float16
num_15:
dtype: float16
num_16:
dtype: float16
num_17:
dtype: float16
num_18:
dtype: float16
num_19:
dtype: float16
num_2:
dtype: float16
num_3:
dtype: float16
num_4:
dtype: float16
num_5:
dtype: float16
num_6:
dtype: float16
num_7:
dtype: float16
num_8:
dtype: float16
num_9:
dtype: float16
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: split_binary
- features:
- label
files:
- test/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- test/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- test/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- test/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- test/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- test/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- test/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- test/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- test/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- test/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- test/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- test/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- test/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- test/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- test/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- test/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- test/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- test/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- test/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- test/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- test/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- test/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- test/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- test/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- test/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- test/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- test/cat_25.bin
type: split_binary
train:
- features: *id001
files:
- train/numerical.bin
type: split_binary
- features:
- label
files:
- train/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- train/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- train/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- train/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- train/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- train/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- train/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- train/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- train/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- train/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- train/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- train/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- train/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- train/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- train/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- train/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- train/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- train/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- train/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- train/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- train/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- train/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- train/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- train/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- train/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- train/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- train/cat_25.bin
type: split_binary
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | category_util_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.category_util."""
import os
import tensorflow as tf
from object_detection.utils import category_util
class EvalUtilTest(tf.test.TestCase):
def test_load_categories_from_csv_file(self):
csv_data = """
0,"cat"
1,"dog"
2,"bird"
""".strip(' ')
csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
with tf.gfile.Open(csv_path, 'wb') as f:
f.write(csv_data)
categories = category_util.load_categories_from_csv_file(csv_path)
self.assertTrue({'id': 0, 'name': 'cat'} in categories)
self.assertTrue({'id': 1, 'name': 'dog'} in categories)
self.assertTrue({'id': 2, 'name': 'bird'} in categories)
def test_save_categories_to_csv_file(self):
categories = [
{'id': 0, 'name': 'cat'},
{'id': 1, 'name': 'dog'},
{'id': 2, 'name': 'bird'},
]
csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
category_util.save_categories_to_csv_file(categories, csv_path)
saved_categories = category_util.load_categories_from_csv_file(csv_path)
self.assertEqual(saved_categories, categories)
if __name__ == '__main__':
tf.test.main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/training | training | checkpoint_utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import shutil
import dllogger
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from hydra.utils import get_original_cwd
from omegaconf import OmegaConf
from loggers.log_helper import restart_logger
def save_checkpoint(trainer, filename="checkpoint.zip", checkpoint_dir="."):
if trainer.ema:
module_to_save = trainer.ema.module
elif isinstance(trainer.model, DDP):
module_to_save = trainer.model.module
else:
module_to_save = trainer.model
state = {
"epoch": trainer.epoch + 1,
"global_step": trainer.global_step,
"model_state_dict": module_to_save.state_dict(),
"optimizer_state_dict": trainer.optimizer.state_dict(),
}
checkpoint_path = os.path.join(checkpoint_dir, filename)
trainer.logger.log(step='event', data={"String": f"Saving checkpoint to {filename}"}, verbosity=dllogger.Verbosity.DEFAULT)
torch.save(state, checkpoint_path)
def maybe_restore_checkpoint(trainer, checkpoint_path):
if checkpoint_path and os.path.isfile(checkpoint_path):
trainer.logger.log(
step='event',
data={"String": f"Restoring checkpoint from {checkpoint_path}"},
verbosity=dllogger.Verbosity.DEFAULT
)
checkpoint = torch.load(checkpoint_path, map_location=trainer.device)
trainer.model.load_state_dict(checkpoint["model_state_dict"])
trainer.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
trainer.global_step = checkpoint["global_step"]
trainer.epoch = checkpoint["epoch"]
def trim_json_log(log_path):
"""
Loads dllogger's json log and returns its lines without unfinished epochs.
Does not modify the logfile
"""
if os.path.isfile(log_path):
with open(log_path, 'r') as f:
lines = f.readlines()
# In case log file is newly created
if not lines:
return lines
for i, l in enumerate(reversed(lines)):
d = json.loads(l[4:])
if d.get('step') == []:
return lines
if 'data' in d and 'String' in d['data'] and 'Epoch' in d['data']['String']:
break
lines = lines[:-i-1]
return lines
return []
def detect_duplicated_run():
"""
Returns list of paths of the runs with the same config as provided
"""
# This is meant to be called in a trainer class, which means that this doesn't have access to the top level config
current_config = OmegaConf.load('.hydra/config.yaml')
rel = os.path.relpath(os.getcwd(), get_original_cwd())
rel = next(x for x in rel.split(os.path.sep))
result_dir = os.path.join(get_original_cwd(), rel)
duplicated = []
for p, s, f in os.walk(result_dir):
if '.hydra' in s:
c = OmegaConf.load(os.path.join(p, '.hydra/config.yaml'))
if hash(c) == hash(current_config):
duplicated.append(p)
# Don't take into account runs that ended before any checkpoint had been saved
# or current run (at this point hydra's config has already been saved)
duplicated = [p for p in duplicated if os.path.exists(os.path.join(p,'last_checkpoint.zip'))]
return duplicated
def get_most_advanced_run(paths, logfile_name):
adv = 0
path = ''
for p in paths:
log_path = os.path.join(p, logfile_name)
log_lines = trim_json_log(log_path)
if len(log_lines) > adv:
adv = len(log_lines)
path = p
return path
def maybe_continue_run(trainer):
duplicates = detect_duplicated_run()
if not duplicates:
return
logfile_name = trainer.config.get('logfile_name', 'log.json')
unfinished_run_path = get_most_advanced_run(duplicates, logfile_name)
checkpoint_path = os.path.join(unfinished_run_path, 'last_checkpoint.zip')
best_checkpoint_path = os.path.join(unfinished_run_path, 'best_checkpoint.zip')
maybe_restore_checkpoint(trainer, checkpoint_path)
log_lines = trim_json_log(os.path.join(unfinished_run_path, logfile_name))
# Reinitialize the logger. This will cause it to append to the copied log file.
with open(logfile_name, 'w') as f:
f.writelines(log_lines)
trainer.logger = restart_logger(trainer.config, trainer.logger)
trainer.logger.log(
step='event',
data={"String": f"Resuming run: {unfinished_run_path}"},
verbosity=dllogger.Verbosity.DEFAULT
)
shutil.copyfile(checkpoint_path, 'last_checkpoint.zip')
shutil.copyfile(best_checkpoint_path, 'best_checkpoint.zip')
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt | tft_pyt | configuration | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data_utils import InputTypes, DataTypes, FeatureSpec
import datetime
class ElectricityConfig():
def __init__(self):
self.features = [
FeatureSpec('id', InputTypes.ID, DataTypes.CATEGORICAL),
FeatureSpec('hours_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('power_usage', InputTypes.TARGET, DataTypes.CONTINUOUS),
FeatureSpec('hour', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('hours_from_start', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('categorical_id', InputTypes.STATIC, DataTypes.CATEGORICAL),
]
# Dataset split boundaries
self.time_ids = 'days_from_start' # This column contains time indices across which we split the data
self.train_range = (1096, 1315)
self.valid_range = (1308, 1339)
self.test_range = (1332, 1346)
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = True
self.missing_id_strategy = None
self.missing_cat_data_strategy='encode_all'
# Feature sizes
self.static_categorical_inp_lens = [369]
self.temporal_known_categorical_inp_lens = []
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 8 * 24
self.encoder_length = 7 * 24
self.n_head = 4
self.hidden_size = 128
self.dropout = 0.1
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
class VolatilityConfig():
def __init__(self):
self.features = [
FeatureSpec('Symbol', InputTypes.ID, DataTypes.CATEGORICAL),
FeatureSpec('days_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('log_vol', InputTypes.TARGET, DataTypes.CONTINUOUS),
FeatureSpec('open_to_close', InputTypes.OBSERVED, DataTypes.CONTINUOUS),
FeatureSpec('days_from_start', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('day_of_month', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('week_of_year', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('month', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('Region', InputTypes.STATIC, DataTypes.CATEGORICAL),
]
# Dataset split boundaries
self.time_ids = 'date' # This column contains time indices across which we split the data
self.train_range = ('2000-01-01', '2016-01-01')
self.valid_range = ('2016-01-01', '2018-01-01')
self.test_range = ('2018-01-01', '2019-06-28')
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = False
self.missing_id_strategy = None
self.missing_cat_data_strategy='encode_all'
# Feature sizes
self.static_categorical_inp_lens = [4]
self.temporal_known_categorical_inp_lens = [7,31,53,12]
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 257
self.encoder_length = 252
self.n_head = 4
self.hidden_size = 96
self.dropout = 0.4
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
class TrafficConfig():
def __init__(self):
self.features = [
FeatureSpec('id', InputTypes.ID, DataTypes.CATEGORICAL),
FeatureSpec('hours_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('values', InputTypes.TARGET, DataTypes.CONTINUOUS),
FeatureSpec('time_on_day', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('hours_from_start', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('categorical_id', InputTypes.STATIC, DataTypes.CATEGORICAL),
]
# Dataset split boundaries
self.time_ids = 'sensor_day' # This column contains time indices across which we split the data
self.train_range = (0, 151)
self.valid_range = (144, 166)
self.test_range = (159, float('inf'))
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = False
self.missing_id_strategy = None
self.missing_cat_data_strategy='encode_all'
# Feature sizes
self.static_categorical_inp_lens = [963]
self.temporal_known_categorical_inp_lens = []
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 8 * 24
self.encoder_length = 7 * 24
self.n_head = 4
self.hidden_size = 128
self.dropout = 0.3
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
class FavoritaConfig():
def __init__(self):
self.features = [
FeatureSpec('traj_id', InputTypes.ID, DataTypes.CATEGORICAL),
#FeatureSpec('days_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('date', InputTypes.TIME, DataTypes.DATE),
FeatureSpec('log_sales', InputTypes.TARGET, DataTypes.CONTINUOUS),
# XXX for no apparent reason TF implementation doesn't scale day_of_month
# and month variables. We probably should set them to be categorical
FeatureSpec('day_of_month', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('month', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('onpromotion', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('national_hol', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('regional_hol', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('local_hol', InputTypes.KNOWN, DataTypes.CATEGORICAL),
FeatureSpec('open', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('transactions', InputTypes.OBSERVED, DataTypes.CONTINUOUS),
FeatureSpec('oil', InputTypes.OBSERVED, DataTypes.CONTINUOUS),
FeatureSpec('categorical_id', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('item_nbr', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('store_nbr', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('city', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('state', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('type', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('cluster', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('family', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('class', InputTypes.STATIC, DataTypes.CATEGORICAL),
FeatureSpec('perishable', InputTypes.STATIC, DataTypes.CATEGORICAL)
]
# Dataset split boundaries
self.time_ids = 'date' # This column contains time indices across which we split the data
# When relative split is set then it is necessary to provide valid boundary.
# Valid split is shifted from train split by number of forecast steps to the future
# The test split is shifted by the number of forecast steps from the valid split
self.relative_split = True
self.valid_boundary = str(datetime.datetime(2015, 12, 1))
self.train_range = None
self.valid_range = None
self.test_range = None
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = True
self.missing_cat_data_strategy='encode_all'
self.missing_id_strategy = 'drop'
# Feature sizes
self.static_categorical_inp_lens = [90200, 3426, 53, 22, 16, 5, 17, 32, 313, 2]
self.temporal_known_categorical_inp_lens = [2, 7, 55, 5, 25]
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 120
self.encoder_length = 90
self.n_head = 4
self.hidden_size = 240
self.dropout = 0.1
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
CONFIGS = {'electricity': ElectricityConfig,
'volatility': VolatilityConfig,
'traffic': TrafficConfig,
'favorita': FavoritaConfig,
}
|
TensorFlow/Classification/ConvNets/runtime | runtime | __init__ | from runtime.runner import Runner |
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine | engine | __init__ | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
TensorFlow/Segmentation/VNet/model | model | vnet | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.layers import input_block, downsample_block, upsample_block, output_block
class Builder():
def __init__(self, kernel_size, n_classes, upscale_blocks, downscale_blocks, upsampling, pooling, normalization,
activation, mode):
self._kernel_size = kernel_size
self._pooling = pooling
self._upsampling = upsampling
self._normalization = normalization
self._activation = activation
self._mode = mode
self._n_classes = n_classes
self._downscale_blocks = downscale_blocks
self._upscale_blocks = upscale_blocks
def __call__(self, features):
x = input_block(inputs=features,
filters=16,
kernel_size=self._kernel_size,
normalization=self._normalization,
activation=self._activation,
mode=self._mode)
skip_connections = [x]
for depth in self._downscale_blocks:
x = downsample_block(inputs=x,
depth=depth,
kernel_size=self._kernel_size,
pooling=self._pooling,
normalization=self._normalization,
activation=self._activation,
mode=self._mode)
skip_connections.append(x)
del skip_connections[-1]
for depth in self._upscale_blocks:
x = upsample_block(inputs=x,
residual_inputs=skip_connections.pop(),
depth=depth,
upsampling=self._upsampling,
kernel_size=self._kernel_size,
normalization=self._normalization,
activation=self._activation,
mode=self._mode)
return output_block(inputs=x,
residual_inputs=skip_connections.pop(),
kernel_size=self._kernel_size,
n_classes=self._n_classes,
upsampling=self._upsampling,
normalization=self._normalization,
activation=self._activation,
mode=self._mode)
|
PyTorch/LanguageModeling/BERT/data | data | create_datasets_from_start | #!/bin/bash
# Copyright (c) 2019-2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Download
download_wikipedia --outdir ${BERT_PREP_WORKING_DIR}/wikipedia/
python3 /workspace/bert/data/bertPrep.py --action download --dataset google_pretrained_weights # Includes vocab
python3 /workspace/bert/data/bertPrep.py --action download --dataset squad
python3 /workspace/bert/data/bertPrep.py --action download --dataset mrpc
python3 /workspace/bert/data/bertPrep.py --action download --dataset sst-2
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B0/training/AMP | AMP | train_benchmark_8xV100-32G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b0_cfg.py \
--mode train_and_eval \
--use_amp \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 3 \
--save_checkpoint_freq 5 \
--train_batch_size 512 \
--eval_batch_size 512 \
--lr_decay cosine \
--augmenter_name autoaugment \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005 |
PyTorch/SpeechRecognition/QuartzNet/scripts/docker | docker | build | #!/bin/bash
docker build . --rm -t quartznet
|
PyTorch/SpeechRecognition/Jasper/configs | configs | jasper10x5dr_speedp-offline_speca | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: "Jasper"
labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"]
input_val:
audio_dataset: &val_dataset
sample_rate: &sample_rate 16000
trim_silence: true
normalize_transcripts: true
filterbank_features: &val_features
normalize: per_feature
sample_rate: *sample_rate
window_size: 0.02
window_stride: 0.01
window: hann
n_filt: &n_filt 64
n_fft: 512
frame_splicing: &frame_splicing 1
dither: 0.00001
pad_align: 16
# For training we keep samples < 16.7s and apply augmentation
input_train:
audio_dataset:
<<: *val_dataset
max_duration: 16.7
ignore_offline_speed_perturbation: false
filterbank_features:
<<: *val_features
max_duration: 16.7
spec_augment:
freq_masks: 2
max_freq: 20
time_masks: 2
max_time: 75
jasper:
encoder:
init: xavier_uniform
in_feats: *n_filt
frame_splicing: *frame_splicing
activation: relu
use_conv_masks: true
blocks:
- &Conv1
filters: 256
repeat: 1
kernel_size: [11]
stride: [2]
dilation: [1]
dropout: 0.2
residual: false
- &B1
filters: 256
repeat: 5
kernel_size: [11]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B1
- &B2
filters: 384
repeat: 5
kernel_size: [13]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B2
- &B3
filters: 512
repeat: 5
kernel_size: [17]
stride: [1]
dilation: [1]
dropout: 0.2
residual: true
residual_dense: true
- *B3
- &B4
filters: 640
repeat: 5
kernel_size: [21]
stride: [1]
dilation: [1]
dropout: 0.3
residual: true
residual_dense: true
- *B4
- &B5
filters: 768
repeat: 5
kernel_size: [25]
stride: [1]
dilation: [1]
dropout: 0.3
residual: true
residual_dense: true
- *B5
- &Conv2
filters: 896
repeat: 1
kernel_size: [29]
stride: [1]
dilation: [2]
dropout: 0.4
residual: false
- &Conv3
filters: &enc_feats 1024
repeat: 1
kernel_size: [1]
stride: [1]
dilation: [1]
dropout: 0.4
residual: false
decoder:
in_feats: *enc_feats
init: xavier_uniform
|
TensorFlow/LanguageModeling/BERT/biobert/scripts | scripts | ner_bc5cdr-chem | #!/bin/bash
echo "Container nvidia build = " $NVIDIA_BUILD_ID
init_checkpoint=${1:-"/results/biobert_tf_uncased_base/model.ckpt"}
train_batch_size=${2:-8}
learning_rate=${3:-3.125e-6}
cased=${4:-false}
precision=${5:-"fp16"}
use_xla=${6:-"true"}
num_gpu=${7:-"16"}
seq_length=${8:-128}
bert_model=${9:-"base"}
eval_batch_size=${10:-8} #Eval and Predict BS is assumed to be same
epochs=${11:-"10.0"}
if [ "$cased" = "true" ] ; then
DO_LOWER_CASE=0
CASING_DIR_PREFIX="cased"
case_flag="--do_lower_case=False"
else
DO_LOWER_CASE=1
CASING_DIR_PREFIX="uncased"
case_flag="--do_lower_case=True"
fi
if [ "$bert_model" = "large" ] ; then
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-24_H-1024_A-16
else
export BERT_DIR=/workspace/bert/data/download/google_pretrained_weights/${CASING_DIR_PREFIX}_L-12_H-768_A-12
fi
export GBS=$(expr $train_batch_size \* $num_gpu)
printf -v TAG "tf_bert_biobert_ner_bc5cdr_chem_%s_%s_gbs%d" "$bert_model" "$precision" $GBS
DATESTAMP=`date +'%y%m%d%H%M%S'`
DATASET_DIR=/workspace/bert/data/biobert/BC5CDR/chem
OUTPUT_DIR=/results/${TAG}_${DATESTAMP}
mkdir -p ${OUTPUT_DIR}
use_fp16=""
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
use_fp16="--amp"
else
echo "fp32/tf32 activated!"
use_fp16="--noamp"
fi
if [ "$use_xla" = "true" ] ; then
use_xla_tag="--use_xla"
echo "XLA activated"
else
use_xla_tag="--nouse_xla"
fi
if [ $num_gpu -gt 1 ] ; then
mpi_command="mpirun -np $num_gpu -H localhost:$num_gpu \
--allow-run-as-root -bind-to none -map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH -mca pml ob1 -mca btl ^openib"
use_hvd="--horovod"
else
mpi_command=""
use_hvd=""
fi
$mpi python /workspace/bert/run_ner.py \
--do_prepare=true \
--do_train=true \
--do_eval=true \
--do_predict=true \
--task_name=bc5cdr \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint=$init_checkpoint \
--num_train_epochs=$epochs \
--data_dir=$DATASET_DIR \
--output_dir=$OUTPUT_DIR \
--learning_rate=$learning_rate \
--train_batch_size=$train_batch_size \
--eval_batch_size=$eval_batch_size \
--predict_batch_size=$eval_batch_size \
--max_seq_length=$seq_length \
$use_hvd $use_fp16 $use_xla_tag $case_flag
|
Tools/PyTorch/TimeSeriesPredictionPlatform/examples | examples | hp_search_distributed | /# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# More info here: https://hydra.cc/docs/plugins/optuna_sweeper/
python launch_training.py \
-m \
'model.config.n_head=choice(1,2,4)' \
'trainer.optimizer.lr=tag(log, interval(1e-5, 1e-2))' \
model=tft \
dataset=electricity \
trainer/criterion=quantile \
trainer.config.batch_size=1024 \
trainer.config.num_epochs=2 \
trainer.config.log_interval=100 \
+optuna_objectives=[P50] \
hydra/sweeper=optuna \
hydra.sweeper.n_trials=4 \
hydra.sweeper.n_jobs=1 \
hydra/launcher=torchrun
|
PyTorch/SpeechSynthesis/HiFiGAN/scripts | scripts | download_cmudict | #!/usr/bin/env bash
set -e
: ${CMUDICT_DIR:="data/cmudict"}
if [ ! -f $CMUDICT_DIR/cmudict-0.7b ]; then
echo "Downloading cmudict-0.7b ..."
wget https://github.com/Alexir/CMUdict/raw/master/cmudict-0.7b -qO $CMUDICT_DIR/cmudict-0.7b
fi
|
PyTorch/LanguageModeling/Transformer-XL | Transformer-XL | getdata | # BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
echo "=== Acquiring datasets ==="
echo "---"
mkdir -p data
cd data
if [[ ! -d 'wikitext-2' ]]; then
echo "- Downloading WikiText-2 (WT2)"
wget --quiet --continue https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip
unzip -q wikitext-2-v1.zip
cd wikitext-2
mv wiki.train.tokens train.txt
mv wiki.valid.tokens valid.txt
mv wiki.test.tokens test.txt
cd ..
fi
echo "- Downloading WikiText-103 (WT2)"
if [[ ! -d 'wikitext-103' ]]; then
wget --continue https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip
unzip -q wikitext-103-v1.zip
cd wikitext-103
mv wiki.train.tokens train.txt
mv wiki.valid.tokens valid.txt
mv wiki.test.tokens test.txt
cd ..
fi
echo "- Downloading enwik8 (Character)"
if [[ ! -d 'enwik8' ]]; then
mkdir -p enwik8
cd enwik8
wget --continue http://mattmahoney.net/dc/enwik8.zip
wget https://raw.githubusercontent.com/salesforce/awd-lstm-lm/master/data/enwik8/prep_enwik8.py
python3 prep_enwik8.py
cd ..
fi
echo "- Downloading text8 (Character)"
if [[ ! -d 'text8' ]]; then
mkdir -p text8
cd text8
wget --continue http://mattmahoney.net/dc/text8.zip
python ../../prep_text8.py
cd ..
fi
echo "- Downloading Penn Treebank (PTB)"
if [[ ! -d 'penn' ]]; then
wget --quiet --continue http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
tar -xzf simple-examples.tgz
mkdir -p penn
cd penn
mv ../simple-examples/data/ptb.train.txt train.txt
mv ../simple-examples/data/ptb.test.txt test.txt
mv ../simple-examples/data/ptb.valid.txt valid.txt
cd ..
echo "- Downloading Penn Treebank (Character)"
mkdir -p pennchar
cd pennchar
mv ../simple-examples/data/ptb.char.train.txt train.txt
mv ../simple-examples/data/ptb.char.test.txt test.txt
mv ../simple-examples/data/ptb.char.valid.txt valid.txt
cd ..
rm -rf simple-examples/
fi
echo "- Downloading 1B words"
if [[ ! -d 'one-billion-words' ]]; then
mkdir -p one-billion-words
cd one-billion-words
wget --no-proxy http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz
tar xzvf 1-billion-word-language-modeling-benchmark-r13output.tar.gz
path="1-billion-word-language-modeling-benchmark-r13output/heldout-monolingual.tokenized.shuffled/"
cat ${path}/news.en.heldout-00000-of-00050 > valid.txt
cat ${path}/news.en.heldout-00000-of-00050 > test.txt
wget https://github.com/rafaljozefowicz/lm/raw/master/1b_word_vocab.txt
cd ..
fi
echo "---"
echo "Happy language modeling :)"
|
TensorFlow2/Recommendation/WideAndDeep/data/outbrain | outbrain | defaults | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
ONEHOT_CHANNEL = "onehot_categorical"
MULTIHOT_CHANNEL = "multihot_categorical"
NUMERICAL_CHANNEL = "numerical"
LABEL_CHANNEL = "label"
MAP_FEATURE_CHANNEL = "map"
TRAIN_MAPPING = "train"
TEST_MAPPING = "test"
PARQUET_TYPE = "parquet"
|
PyTorch/LanguageModeling/BERT/triton/large/scripts/docker | docker | build | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker build -t bert . -f triton/Dockerfile
|
TensorFlow/Detection/SSD/models/research/slim/scripts | scripts | finetune_resnet_v1_50_on_flowers | #!/bin/bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script performs the following operations:
# 1. Downloads the Flowers dataset
# 2. Fine-tunes a ResNetV1-50 model on the Flowers training set.
# 3. Evaluates the model on the Flowers validation set.
#
# Usage:
# cd slim
# ./slim/scripts/finetune_resnet_v1_50_on_flowers.sh
set -e
# Where the pre-trained ResNetV1-50 checkpoint is saved to.
PRETRAINED_CHECKPOINT_DIR=/tmp/checkpoints
# Where the training (fine-tuned) checkpoint and logs will be saved to.
TRAIN_DIR=/tmp/flowers-models/resnet_v1_50
# Where the dataset is saved to.
DATASET_DIR=/tmp/flowers
# Download the pre-trained checkpoint.
if [ ! -d "$PRETRAINED_CHECKPOINT_DIR" ]; then
mkdir ${PRETRAINED_CHECKPOINT_DIR}
fi
if [ ! -f ${PRETRAINED_CHECKPOINT_DIR}/resnet_v1_50.ckpt ]; then
wget http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz
tar -xvf resnet_v1_50_2016_08_28.tar.gz
mv resnet_v1_50.ckpt ${PRETRAINED_CHECKPOINT_DIR}/resnet_v1_50.ckpt
rm resnet_v1_50_2016_08_28.tar.gz
fi
# Download the dataset
python download_and_convert_data.py \
--dataset_name=flowers \
--dataset_dir=${DATASET_DIR}
# Fine-tune only the new layers for 3000 steps.
python train_image_classifier.py \
--train_dir=${TRAIN_DIR} \
--dataset_name=flowers \
--dataset_split_name=train \
--dataset_dir=${DATASET_DIR} \
--model_name=resnet_v1_50 \
--checkpoint_path=${PRETRAINED_CHECKPOINT_DIR}/resnet_v1_50.ckpt \
--checkpoint_exclude_scopes=resnet_v1_50/logits \
--trainable_scopes=resnet_v1_50/logits \
--max_number_of_steps=3000 \
--batch_size=32 \
--learning_rate=0.01 \
--save_interval_secs=60 \
--save_summaries_secs=60 \
--log_every_n_steps=100 \
--optimizer=rmsprop \
--weight_decay=0.00004
# Run evaluation.
python eval_image_classifier.py \
--checkpoint_path=${TRAIN_DIR} \
--eval_dir=${TRAIN_DIR} \
--dataset_name=flowers \
--dataset_split_name=validation \
--dataset_dir=${DATASET_DIR} \
--model_name=resnet_v1_50
# Fine-tune all the new layers for 1000 steps.
python train_image_classifier.py \
--train_dir=${TRAIN_DIR}/all \
--dataset_name=flowers \
--dataset_split_name=train \
--dataset_dir=${DATASET_DIR} \
--checkpoint_path=${TRAIN_DIR} \
--model_name=resnet_v1_50 \
--max_number_of_steps=1000 \
--batch_size=32 \
--learning_rate=0.001 \
--save_interval_secs=60 \
--save_summaries_secs=60 \
--log_every_n_steps=100 \
--optimizer=rmsprop \
--weight_decay=0.00004
# Run evaluation.
python eval_image_classifier.py \
--checkpoint_path=${TRAIN_DIR}/all \
--eval_dir=${TRAIN_DIR}/all \
--dataset_name=flowers \
--dataset_split_name=validation \
--dataset_dir=${DATASET_DIR} \
--model_name=resnet_v1_50
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/waveglow | waveglow | waveGlowLoader | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "waveGlowLoader.h"
#include "engineCache.h"
#include "trtUtils.h"
#include "utils.h"
#include "waveGlowBuilder.h"
#include "NvInfer.h"
#include <stdexcept>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
std::shared_ptr<WaveGlowInstance> WaveGlowLoader::load(EngineCache& cache, IBuilder& builder,
std::shared_ptr<ILogger> logger, const std::string& filename, const bool fp16, const int batchSize)
{
TRTPtr<ICudaEngine> engine;
if (Utils::hasExtension(filename, ".onnx")) {
WaveGlowBuilder waveGlowBuilder(filename, logger);
engine = waveGlowBuilder.build(builder, batchSize, fp16);
// save generated engine
const std::string engFilename(filename + ".eng");
cache.save(*engine, engFilename);
}
else if (Utils::hasExtension(filename, ".eng"))
{
engine = cache.load(filename);
if (TRTUtils::getMaxBatchSize(*engine) < batchSize)
{
throw std::runtime_error(
"Engine " + filename
+ " does not support "
" the requested batch size: "
+ std::to_string(engine->getMaxBatchSize()) + " / "
+ std::to_string(batchSize)
+ ". "
"Rebuild the engine with the larger batch size.");
}
}
else
{
throw std::runtime_error("Unknown model file type: " + filename);
}
return std::make_shared<WaveGlowInstance>(std::move(engine));
}
} // namespace tts
|
TensorFlow/LanguageModeling/BERT/utils | utils | create_pretraining_data | # coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import h5py
import tensorflow as tf
import numpy as np
from tqdm import tqdm, trange
from tokenization import BertTokenizer
import tokenization as tokenization
import random
import collections
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
s += "is_random_next: %s\n" % self.is_random_next
s += "masked_lm_positions: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions]))
s += "masked_lm_labels: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files, output_formats="tfrecord"):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
if 'hdf5' in output_formats:
features_hdf5 = collections.OrderedDict()
num_instances = len(instances)
features_hdf5["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features_hdf5["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features_hdf5["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features_hdf5["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
features_hdf5["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
features_hdf5["next_sentence_labels"] = np.zeros(num_instances, dtype="int32")
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
features["next_sentence_labels"] = create_int_feature([next_sentence_label])
if 'tfrecord' in output_formats:
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
if 'hdf5' in output_formats:
features_hdf5["input_ids"][inst_index] = input_ids
features_hdf5["input_mask"][inst_index] = input_mask
features_hdf5["segment_ids"][inst_index] = segment_ids
features_hdf5["masked_lm_positions"][inst_index] = masked_lm_positions
features_hdf5["masked_lm_ids"][inst_index] = masked_lm_ids
features_hdf5["next_sentence_labels"][inst_index] = next_sentence_label
if 'tfrecord' not in output_formats and 'hdf5' not in output_formats:
assert False, 'Either empty output_formats list or unsupported type specified. Try: tfrecord or hdf5'
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.compat.v1.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
if 'hdf5' in output_formats:
f = h5py.File(output_file, 'w')
f.create_dataset("input_ids", data=features_hdf5["input_ids"], dtype='i4', compression='gzip')
f.create_dataset("input_mask", data=features_hdf5["input_mask"], dtype='i1', compression='gzip')
f.create_dataset("segment_ids", data=features_hdf5["segment_ids"], dtype='i1', compression='gzip')
f.create_dataset("masked_lm_positions", data=features_hdf5["masked_lm_positions"], dtype='i4', compression='gzip')
f.create_dataset("masked_lm_ids", data=features_hdf5["masked_lm_ids"], dtype='i4', compression='gzip')
f.create_dataset("next_sentence_labels", data=features_hdf5["next_sentence_labels"], dtype='i1', compression='gzip')
f.flush()
f.close()
tf.compat.v1.logging.info("Wrote %d total instances", total_written)
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
print("creating instance from {}".format(input_file))
with open(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
#If picked random document is the same as the current document
if random_document_index == document_index:
is_random_next = False
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--vocab_file",
default=None,
type=str,
required=True,
help="The vocabulary the BERT model will train on.")
parser.add_argument("--input_file",
default=None,
type=str,
required=True,
help="The input train corpus. can be directory with .txt files or a path to a single file")
parser.add_argument("--output_file",
default=None,
type=str,
required=True,
help="The output file where the model checkpoints will be written.")
## Other parameters
# int
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--dupe_factor",
default=10,
type=int,
help="Number of times to duplicate the input data (with different masks).")
parser.add_argument("--max_predictions_per_seq",
default=20,
type=int,
help="Maximum sequence length.")
# floats
parser.add_argument("--masked_lm_prob",
default=0.15,
type=float,
help="Masked LM probability.")
parser.add_argument("--short_seq_prob",
default=0.1,
type=float,
help="Probability to create a sequence shorter than maximum sequence length")
parser.add_argument("--do_lower_case",
action='store_true',
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument('--random_seed',
type=int,
default=12345,
help="random seed for initialization")
args = parser.parse_args()
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case)
input_files = []
if os.path.isfile(args.input_file):
input_files.append(args.input_file)
elif os.path.isdir(args.input_file):
input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if
(os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt'))]
else:
raise ValueError("{} is not a valid path".format(args.input_file))
rng = random.Random(args.random_seed)
instances = create_training_instances(
input_files, tokenizer, args.max_seq_length, args.dupe_factor,
args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq,
rng)
output_files = args.output_file.split(",")
print("*** Writing to output files ***")
for output_file in output_files:
print(output_file)
write_instance_to_example_files(instances, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, output_files)
if __name__ == "__main__":
main()
|
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit | deployment_toolkit | warmup | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import List, Optional
def warmup(
model_name: str,
batch_sizes: List[int],
triton_gpu_engine_count: int = 1,
triton_instances: int = 1,
profiling_data: str = "random",
input_shapes: Optional[List[str]] = None,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Warmup start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
measurement_window = 6 * measurement_window
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 2)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
print("\n")
print(f"==== Warmup done ====")
print("\n")
|
PyTorch/LanguageModeling/BERT/triton | triton | run_inference_on_fw | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_inference_on_fw.py` script.
It infers data obtained from pointed data loader locally and saves received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Example call:
```shell script
python ./triton/run_inference_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-size 32 \
--output-dir /results/dump_local \
--dump-labels
```
"""
import argparse
import logging
import os
import copy
from pathlib import Path
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseRunner,
load_from_file,
)
from .deployment_toolkit.dump import JsonDumpWriter # noqa: E402 module level import not at top of file
from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("run_inference_on_fw")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info("Data loader initialized; Running inference")
for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10):
y_pred = runner_session(x)
data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real)
data = copy.deepcopy(data)
writer.write(**data)
LOGGER.info("Inference finished")
if __name__ == "__main__":
main()
|
TensorFlow/LanguageModeling/BERT/triton/scripts | scripts | wait_for_triton_server | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SERVER_URI=${1:-"localhost"}
echo "Waiting for TRITON Server to be ready at http://$SERVER_URI:8000..."
live_command="curl -m 1 -L -s -o /dev/null -w %{http_code} http://$SERVER_URI:8000/v2/health/live"
ready_command="curl -m 1 -L -s -o /dev/null -w %{http_code} http://$SERVER_URI:8000/v2/health/ready"
current_status=$($live_command)
# First check the current status. If that passes, check the json. If either fail, loop
while [[ ${current_status} != "200" ]] || [[ $($ready_command) != "200" ]]; do
printf "."
sleep 1
current_status=$($live_command)
done
echo "TRITON Server is ready!"
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/models/layers | layers | score_predictor | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Predictor layer
"""
class ScorePredictor(nn.Module):
def __init__(self, input_dim, output_dim, L=2):
super().__init__()
list_FC_layers = [
nn.Linear(
input_dim // 2 ** l, input_dim // 2 ** (l + 1), bias=True
)
for l in range(L)
]
list_FC_layers.append(
nn.Linear(input_dim // 2 ** L, output_dim, bias=True)
)
self.FC_layers = nn.ModuleList(list_FC_layers)
self.L = L
def forward(self, x):
y = x
for l in range(self.L):
y = self.FC_layers[l](y)
y = F.relu(y)
y = self.FC_layers[self.L](y)
return y
|
TensorFlow/Detection/SSD | SSD | .gitignore | **/*.swp
|
PyTorch/LanguageModeling/BERT | BERT | bind | # Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#! /bin/bash
set -euo pipefail
print_usage() {
cat << EOF
${0} [options] [--] COMMAND [ARG...]
Control binding policy for each task. Assumes one rank will be launched for each GPU.
Options:
--cpu=MODE
* exclusive -- bind each rank to an exclusive set of cores near its GPU
* exclusive,nosmt -- bind each rank to an exclusive set of cores near its GPU, without hyperthreading
* node -- bind each rank to all cores in the NUMA node nearest its GPU [default]
* *.sh -- bind each rank using the bash associative array bind_cpu_cores or bind_cpu_nodes from a file
* off -- don't bind
--mem=MODE
* node -- bind each rank to the nearest NUMA node [default]
* *.sh -- bind each rank using the bash associative array bind_mem from a file
* off -- don't bind
--ib=MODE
* single -- bind each rank to a single IB device near its GPU
* off -- donot bind [default]
--cluster=CLUSTER
Select which cluster is being used. May be required if system params cannot be detected.
EOF
}
################################################################################
# Argument parsing
################################################################################
cpu_mode='node'
mem_mode='node'
ib_mode='off'
cluster=''
while [ $# -gt 0 ]; do
case "$1" in
-h|--help) print_usage ; exit 0 ;;
--cpu=*) cpu_mode="${1/*=/}"; shift ;;
--cpu) cpu_mode="$2"; shift 2 ;;
--mem=*) mem_mode="${1/*=/}"; shift ;;
--mem) mem_mode="$2"; shift 2 ;;
--ib=*) ib_mode="${1/*=/}"; shift ;;
--ib) ib_mode="$2"; shift 2 ;;
--cluster=*) cluster="${1/*=/}"; shift ;;
--cluster) cluster="$2"; shift 2 ;;
--) shift; break ;;
*) break ;;
esac
done
if [ $# -lt 1 ]; then
echo 'ERROR: no command given' 2>&1
print_usage
exit 1
fi
################################################################################
# Get system params
################################################################################
# LOCAL_RANK is set with an enroot hook for Pytorch containers
# SLURM_LOCALID is set by Slurm
# OMPI_COMM_WORLD_LOCAL_RANK is set by mpirun
readonly local_rank="${LOCAL_RANK:=${SLURM_LOCALID:=${OMPI_COMM_WORLD_LOCAL_RANK:-}}}"
if [ -z "${local_rank}" ]; then
echo 'ERROR: cannot read LOCAL_RANK from env' >&2
exit 1
fi
num_gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader,nounits)
if [ "${local_rank}" -ge "${num_gpus}" ]; then
echo "ERROR: local rank is ${local_rank}, but there are only ${num_gpus} gpus available" >&2
exit 1
fi
get_lscpu_value() {
awk -F: "(\$1 == \"${1}\"){gsub(/ /, \"\", \$2); print \$2; found=1} END{exit found!=1}"
}
lscpu_out=$(lscpu)
num_sockets=$(get_lscpu_value 'Socket(s)' <<< "${lscpu_out}")
num_nodes=$(get_lscpu_value 'NUMA node(s)' <<< "${lscpu_out}")
cores_per_socket=$(get_lscpu_value 'Core(s) per socket' <<< "${lscpu_out}")
echo "num_sockets = ${num_sockets} num_nodes=${num_nodes} cores_per_socket=${cores_per_socket}"
readonly cores_per_node=$(( (num_sockets * cores_per_socket) / num_nodes ))
if [ ${num_gpus} -gt 1 ]; then
readonly gpus_per_node=$(( num_gpus / num_nodes ))
else
readonly gpus_per_node=1
fi
readonly cores_per_gpu=$(( cores_per_node / gpus_per_node ))
readonly local_node=$(( local_rank / gpus_per_node ))
declare -a ibdevs=()
case "${cluster}" in
circe)
# Need to specialize for circe because IB detection is hard
ibdevs=(mlx5_1 mlx5_2 mlx5_3 mlx5_4 mlx5_7 mlx5_8 mlx5_9 mlx5_10)
;;
selene)
# Need to specialize for selene because IB detection is hard
ibdevs=(mlx5_0 mlx5_1 mlx5_2 mlx5_3 mlx5_6 mlx5_7 mlx5_8 mlx5_9)
;;
'')
if ibstat_out="$(ibstat -l 2>/dev/null | sort -V)" ; then
mapfile -t ibdevs <<< "${ibstat_out}"
fi
;;
*)
echo "ERROR: Unknown cluster '${cluster}'" >&2
exit 1
;;
esac
readonly num_ibdevs="${#ibdevs[@]}"
################################################################################
# Setup for exec
################################################################################
declare -a numactl_args=()
case "${cpu_mode}" in
exclusive)
numactl_args+=( "$(printf -- "--physcpubind=%u-%u,%u-%u" \
$(( local_rank * cores_per_gpu )) \
$(( (local_rank + 1) * cores_per_gpu - 1 )) \
$(( local_rank * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) )) \
$(( (local_rank + 1) * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) - 1 )) \
)" )
;;
exclusive,nosmt)
numactl_args+=( "$(printf -- "--physcpubind=%u-%u" \
$(( local_rank * cores_per_gpu )) \
$(( (local_rank + 1) * cores_per_gpu - 1 )) \
)" )
;;
node)
numactl_args+=( "--cpunodebind=${local_node}" )
;;
*.sh)
source "${cpu_mode}"
if [ -n "${bind_cpu_cores:-}" ]; then
numactl_args+=( "--physcpubind=${bind_cpu_cores[${local_rank}]}" )
elif [ -n "${bind_cpu_nodes:-}" ]; then
numactl_args+=( "--cpunodebind=${bind_cpu_nodes[${local_rank}]}" )
else
echo "ERROR: invalid CPU affinity file ${cpu_mode}." >&2
exit 1
fi
;;
off|'')
;;
*)
echo "ERROR: invalid cpu mode '${cpu_mode}'" 2>&1
print_usage
exit 1
;;
esac
case "${mem_mode}" in
node)
numactl_args+=( "--membind=${local_node}" )
;;
*.sh)
source "${mem_mode}"
if [ -z "${bind_mem:-}" ]; then
echo "ERROR: invalid memory affinity file ${mem_mode}." >&2
exit 1
fi
numactl_args+=( "--membind=${bind_mem[${local_rank}]}" )
;;
off|'')
;;
*)
echo "ERROR: invalid mem mode '${mem_mode}'" 2>&1
print_usage
exit 1
;;
esac
case "${ib_mode}" in
single)
if [ "${num_ibdevs}" -eq 0 ]; then
echo "WARNING: used '$0 --ib=single', but there are 0 IB devices available; skipping IB binding." 2>&1
else
readonly ibdev="${ibdevs[$(( local_rank * num_ibdevs / num_gpus ))]}"
export OMPI_MCA_btl_openib_if_include="${OMPI_MCA_btl_openib_if_include-$ibdev}"
fi
;;
off|'')
;;
*)
echo "ERROR: invalid ib mode '${ib_mode}'" 2>&1
print_usage
exit 1
;;
esac
################################################################################
# Exec
################################################################################
if [ "${#numactl_args[@]}" -gt 0 ] ; then
set -x
exec numactl "${numactl_args[@]}" -- "${@}"
else
exec "${@}"
fi
|
PyTorch/SpeechRecognition/QuartzNet/quartznet | quartznet | model | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from common import filter_warnings
activations = {
"hardtanh": nn.Hardtanh,
"relu": nn.ReLU,
"selu": nn.SELU,
}
def init_weights(m, mode='xavier_uniform'):
if type(m) == nn.Conv1d or type(m) == MaskedConv1d:
if mode == 'xavier_uniform':
nn.init.xavier_uniform_(m.weight, gain=1.0)
elif mode == 'xavier_normal':
nn.init.xavier_normal_(m.weight, gain=1.0)
elif mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
else:
raise ValueError("Unknown Initialization mode: {0}".format(mode))
elif type(m) == nn.BatchNorm1d:
if m.track_running_stats:
m.running_mean.zero_()
m.running_var.fill_(1)
m.num_batches_tracked.zero_()
if m.affine:
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def compute_new_kernel_size(kernel_size, kernel_width):
new_kernel_size = max(int(kernel_size * kernel_width), 1)
# If kernel is even shape, round up to make it odd
if new_kernel_size % 2 == 0:
new_kernel_size += 1
return new_kernel_size
def get_same_padding(kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
return (kernel_size // 2) * dilation
class GroupShuffle(nn.Module):
def __init__(self, groups, channels):
super(GroupShuffle, self).__init__()
self.groups = groups
self.channels_per_group = channels // groups
def forward(self, x):
sh = x.shape
x = x.view(-1, self.groups, self.channels_per_group, sh[-1])
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(-1, self.groups * self.channels_per_group, sh[-1])
return x
class MaskedConv1d(nn.Conv1d):
"""1D convolution with sequence masking
"""
__constants__ = ["masked"]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, use_mask=True,
heads=-1):
# Jasper refactor compat
assert heads == -1 # Unsupported
masked = use_mask
super(MaskedConv1d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.masked = masked
def get_seq_len(self, lens):
pad, ks = self.padding[0], self.kernel_size[0]
return torch.div(lens + 2 * pad - self.dilation[0] * (ks - 1) - 1,
self.stride[0], rounding_mode='trunc') + 1
def forward(self, x, x_lens=None):
if self.masked:
max_len = x.size(2)
idxs = torch.arange(max_len, dtype=x_lens.dtype, device=x.device)
mask = idxs.expand(x_lens.size(0), max_len) >= x_lens.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1).to(device=x.device), 0)
x_lens = self.get_seq_len(x_lens)
return super(MaskedConv1d, self).forward(x), x_lens
class JasperBlock(nn.Module):
__constants__ = ["conv_mask", "separable", "res", "mconv"]
def __init__(self, infilters, filters, repeat=3, kernel_size=11,
kernel_size_factor=1, stride=1, dilation=1, padding='same',
dropout=0.2, activation=None, residual=True, groups=1,
separable=False, heads=-1, normalization="batch",
norm_groups=1, residual_panes=[], use_conv_masks=False):
super(JasperBlock, self).__init__()
# Fix params being passed as list, but default to ints
wrap = lambda v: [v] if type(v) is int else v
kernel_size = wrap(kernel_size)
dilation = wrap(dilation)
padding = wrap(padding)
stride = wrap(stride)
if padding != "same":
raise ValueError("currently only 'same' padding is supported")
kernel_size_factor = float(kernel_size_factor)
if type(kernel_size) in (list, tuple):
kernel_size = [compute_new_kernel_size(k, kernel_size_factor)
for k in kernel_size]
else:
kernel_size = compute_new_kernel_size(kernel_size,
kernel_size_factor)
padding_val = get_same_padding(kernel_size[0], stride[0], dilation[0])
self.conv_mask = use_conv_masks
self.separable = separable
infilters_loop = infilters
conv = nn.ModuleList()
for _ in range(repeat - 1):
conv.extend(
self._get_conv_bn_layer(
infilters_loop, filters, kernel_size=kernel_size,
stride=stride, dilation=dilation, padding=padding_val,
groups=groups, heads=heads, separable=separable,
normalization=normalization, norm_groups=norm_groups)
)
conv.extend(self._get_act_dropout_layer(drop_prob=dropout,
activation=activation))
infilters_loop = filters
conv.extend(
self._get_conv_bn_layer(
infilters_loop, filters, kernel_size=kernel_size, stride=stride,
dilation=dilation, padding=padding_val, groups=groups,
heads=heads, separable=separable, normalization=normalization,
norm_groups=norm_groups)
)
self.mconv = conv
res_panes = residual_panes.copy()
self.dense_residual = residual
if residual:
res_list = nn.ModuleList()
if len(residual_panes) == 0:
res_panes = [infilters]
self.dense_residual = False
for ip in res_panes:
res_list.append(nn.ModuleList(
self._get_conv_bn_layer(ip, filters, kernel_size=1,
normalization=normalization,
norm_groups=norm_groups, stride=[1])
))
self.res = res_list
else:
self.res = None
self.mout = nn.Sequential(*self._get_act_dropout_layer(
drop_prob=dropout, activation=activation))
def _get_conv(self, in_channels, out_channels, kernel_size=11, stride=1,
dilation=1, padding=0, bias=False, groups=1, heads=-1,
separable=False):
kw = {'in_channels': in_channels, 'out_channels': out_channels,
'kernel_size': kernel_size, 'stride': stride, 'dilation': dilation,
'padding': padding, 'bias': bias, 'groups': groups}
if self.conv_mask:
return MaskedConv1d(**kw, heads=heads, use_mask=self.conv_mask)
else:
return nn.Conv1d(**kw)
def _get_conv_bn_layer(self, in_channels, out_channels, kernel_size=11,
stride=1, dilation=1, padding=0, bias=False,
groups=1, heads=-1, separable=False,
normalization="batch", norm_groups=1):
if norm_groups == -1:
norm_groups = out_channels
if separable:
layers = [
self._get_conv(in_channels, in_channels, kernel_size,
stride=stride, dilation=dilation, padding=padding,
bias=bias, groups=in_channels, heads=heads),
self._get_conv(in_channels, out_channels, kernel_size=1,
stride=1, dilation=1, padding=0, bias=bias,
groups=groups),
]
else:
layers = [
self._get_conv(in_channels, out_channels, kernel_size,
stride=stride, dilation=dilation,
padding=padding, bias=bias, groups=groups)
]
if normalization == "group":
layers.append(nn.GroupNorm(num_groups=norm_groups,
num_channels=out_channels))
elif normalization == "instance":
layers.append(nn.GroupNorm(num_groups=out_channels,
num_channels=out_channels))
elif normalization == "layer":
layers.append(nn.GroupNorm(num_groups=1, num_channels=out_channels))
elif normalization == "batch":
layers.append(nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1))
else:
raise ValueError(
f"Normalization method ({normalization}) does not match"
f" one of [batch, layer, group, instance]."
)
if groups > 1:
layers.append(GroupShuffle(groups, out_channels))
return layers
def _get_act_dropout_layer(self, drop_prob=0.2, activation=None):
if activation is None:
activation = nn.Hardtanh(min_val=0.0, max_val=20.0)
layers = [activation, nn.Dropout(p=drop_prob)]
return layers
def forward(self, xs, xs_lens=None):
if not self.conv_mask:
xs_lens = 0
# compute forward convolutions
out = xs[-1]
lens = xs_lens
for i, l in enumerate(self.mconv):
# if we're doing masked convolutions, we need to pass in and
# possibly update the sequence lengths
# if (i % 4) == 0 and self.conv_mask:
if isinstance(l, MaskedConv1d):
out, lens = l(out, lens)
else:
out = l(out)
# compute the residuals
if self.res is not None:
for i, layer in enumerate(self.res):
res_out = xs[i]
for j, res_layer in enumerate(layer):
if isinstance(res_layer, MaskedConv1d):
res_out, _ = res_layer(res_out, xs_lens)
else:
res_out = res_layer(res_out)
out = out + res_out
# compute the output
out = self.mout(out)
if self.res is not None and self.dense_residual:
out = xs + [out]
else:
out = [out]
return (out, lens) if self.conv_mask else (out, None)
class JasperEncoder(nn.Module):
__constants__ = ["use_conv_masks"]
def __init__(self, in_feats, activation, frame_splicing=1,
init='xavier_uniform', use_conv_masks=False, blocks=[]):
super(JasperEncoder, self).__init__()
self.use_conv_masks = use_conv_masks
self.layers = nn.ModuleList()
in_feats *= frame_splicing
all_residual_panes = []
for i, blk in enumerate(blocks):
blk['activation'] = activations[activation]()
has_residual_dense = blk.pop('residual_dense', False)
if has_residual_dense:
all_residual_panes += [in_feats]
blk['residual_panes'] = all_residual_panes
else:
blk['residual_panes'] = []
self.layers.append(
JasperBlock(in_feats, use_conv_masks=use_conv_masks, **blk))
in_feats = blk['filters']
self.apply(lambda x: init_weights(x, mode=init))
def forward(self, x, x_lens=None):
out, out_lens = [x], x_lens
for layer in self.layers:
out, out_lens = layer(out, out_lens)
return out, out_lens
class JasperDecoderForCTC(nn.Module):
def __init__(self, in_feats, n_classes, init='xavier_uniform'):
super(JasperDecoderForCTC, self).__init__()
self.layers = nn.Sequential(
nn.Conv1d(in_feats, n_classes, kernel_size=1, bias=True),)
self.apply(lambda x: init_weights(x, mode=init))
def forward(self, enc_out):
out = self.layers(enc_out[-1]).transpose(1, 2)
return F.log_softmax(out, dim=2)
class GreedyCTCDecoder(nn.Module):
@torch.no_grad()
def forward(self, log_probs):
return log_probs.argmax(dim=-1, keepdim=False).int()
class QuartzNet(nn.Module):
def __init__(self, encoder_kw, decoder_kw, transpose_in=False):
super(QuartzNet, self).__init__()
self.transpose_in = transpose_in
self.encoder = JasperEncoder(**encoder_kw)
self.decoder = JasperDecoderForCTC(**decoder_kw)
def forward(self, x, x_lens=None):
if self.encoder.use_conv_masks:
assert x_lens is not None
enc, enc_lens = self.encoder(x, x_lens)
out = self.decoder(enc)
return out, enc_lens
else:
if self.transpose_in:
x = x.transpose(1, 2)
enc, _ = self.encoder(x)
out = self.decoder(enc)
return out # XXX torchscript refuses to output None
# TODO Explicitly add x_lens=None for inference (now x can be a Tensor or tuple)
def infer(self, x):
if self.encoder.use_conv_masks:
return self.forward(x)
else:
ret = self.forward(x[0])
return ret, len(ret)
class CTCLossNM:
def __init__(self, n_classes):
self._criterion = nn.CTCLoss(blank=n_classes-1, reduction='none')
def __call__(self, log_probs, targets, input_length, target_length):
input_length = input_length.long()
target_length = target_length.long()
targets = targets.long()
loss = self._criterion(log_probs.transpose(1, 0), targets,
input_length, target_length)
# note that this is different from reduction = 'mean'
# because we are not dividing by target lengths
return torch.mean(loss)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker/containers | containers | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .triton_server_container import TritonServerContainer
|
PyTorch/LanguageModeling/BERT/lamb_amp_opt/csrc | csrc | frontend | #include <torch/extension.h>
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::optional<bool> per_tensor_python);
void multi_tensor_lamb_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor lr,
const float beta1,
const float beta2,
const float epsilon,
const at::Tensor step,
const int bias_correction,
const float weight_decay,
const int grad_averaging,
const int mode,
at::Tensor global_grad_norm,
at::Tensor max_grad_norm,
at::optional<bool> use_nvlamb_python,
at::Tensor found_inf,
at::Tensor inv_scale);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("multi_tensor_l2norm", &multi_tensor_l2norm_cuda,
"Computes L2 norm for a list of contiguous tensors");
m.def("multi_tensor_lamb", &multi_tensor_lamb_cuda,
"Computes and apply update for LAMB optimizer");
}
|
PyTorch/Forecasting/TFT/triton/runner | runner | finalizer | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import shutil
from typing import Dict, List
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .experiment import ExperimentResult
from .logger import LOGGER
from .stages import ResultsType
from .summary import load_results, save_summary
from .task import Task
class Finalizer(abc.ABC):
@abc.abstractmethod
def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]):
pass
class ExperimentFinalizer(Finalizer):
"""
Public runner finalizer object.
"""
def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]):
results_path = workspace / task.results_dir
self._generate_summary(results_path, results)
self._finalize_task(results_path, task)
def _finalize_task(self, results_path: pathlib.Path, task: Task) -> None:
"""
Finalize task information
Args:
task: Task object
Returns:
None
"""
task.end()
file_path = results_path / task.filename
LOGGER.debug(f"Saving task details to file {file_path}")
task.to_file(file_path)
LOGGER.debug("Done")
LOGGER.info(f"Task details and results stored in {results_path}")
def _generate_summary(self, results_path: pathlib.Path, experiment_results: List[ExperimentResult]):
"""
Generate summary for results collected in all experiments
Args:
results_path: Path where results should be stored
experiment_results: Results collected from experiments
Returns:
"""
performance_offline_results = list()
performance_online_results = list()
results_mapping = {
ResultsType.TRITON_PERFORMANCE_OFFLINE: performance_offline_results,
ResultsType.TRITON_PERFORMANCE_ONLINE: performance_online_results,
}
self._collect_summary_results(experiment_results, results_mapping)
self._prepare_final_results(results_path, results_mapping)
def _collect_summary_results(self, experiment_results: List[ExperimentResult], results_mapping: Dict):
for experiment_result in experiment_results:
experiment = experiment_result.experiment
for result_type, result_path in experiment_result.results.items():
if not result_path.is_file() and not result_path.is_dir():
raise FileNotFoundError(f"Expected file {result_path} not found")
LOGGER.debug(f"Found {result_type} in {result_path} file.")
if result_type not in results_mapping:
LOGGER.debug(f"Results {result_type} for {experiment.experiment_id} are ignored in final summary.")
return
LOGGER.debug(f"Collecting {result_type} results from {result_path} for summary")
result = load_results(
results_path=result_path,
parameters=experiment.parameters,
result_type=result_type,
)
results_mapping[result_type].extend(result)
LOGGER.debug(f"Done.")
def _prepare_final_results(self, results_path: pathlib.Path, results_mapping: Dict) -> None:
"""
Prepare summary files for offline and online performance
Args:
results_path: Path where results should be stored
results_mapping: Mapping with results type and collected results for given stage
Returns:
None
"""
for results_type, results in results_mapping.items():
save_summary(
result_type=results_type,
results=results,
summary_dir=results_path,
)
|
PyTorch/SpeechSynthesis/FastPitch/phrases | phrases | phrase_4_64 | She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | exp_utils | # Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dllogger
import os
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self, warmup=0, keep=False):
self.reset()
self.warmup = warmup
self.keep = keep
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.iters = 0
self.vals = []
def update(self, val, n=1):
self.iters += 1
self.val = val
if self.iters > self.warmup:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.keep:
self.vals.append(val)
def setup_dllogger(enabled=True, filename=os.devnull, rank=0):
if enabled and rank == 0:
backends = [
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE,
filename,
),
]
dllogger.init(backends)
else:
dllogger.init([])
dllogger.metadata("infer_throughput", {"unit": "images/s"})
dllogger.metadata("train_throughput", {"unit": "images/s"})
|
TensorFlow/Segmentation/UNet_Industrial/notebooks | notebooks | Colab_UNet_Industrial_TF_TFHub_export | #!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_Industrial/notebooks/Colab_UNet_Industrial_TF_TFHub_export.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# In[ ]:
# Copyright 2019 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
#
# # UNet Industrial Demo on TensorFLow Hub: Export and Inference
# ## Overview
#
#
# In this notebook, we will demo the process of exporting NVIDIA NGC [Unet Industrial defects detection models](https://ngc.nvidia.com/catalog/model-scripts/nvidia:unet_industrial_for_tensorflow) to TF-Hub modules, which can be persisted to disk, saved to a Google Drive folder or published on to TF-Hub. NVIDIA pre-trained U-Net model is adapted from the original version of the [U-Net model](https://arxiv.org/abs/1505.04597) which is
# a convolutional auto-encoder for 2D image segmentation. U-Net was first introduced by
# Olaf Ronneberger, Philip Fischer, and Thomas Brox in the paper:
# [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597).
#
# [NVIDIA NGC](https://ngc.nvidia.com/catalog/models) is the hub for GPU-optimized software and pre-trained models for deep learning, machine learning, and HPC that takes care of all the plumbing so data scientists, developers, and researchers can focus on building solutions, gathering insights, and delivering business value.
#
# [TensorFlow Hub](https://www.tensorflow.org/hub) is "a library for the publication, discovery, and consumption of reusable parts of machine learning models. A module is a self-contained piece of a TensorFlow graph, along with its weights and assets, that can be reused across different tasks in a process known as transfer learning."
#
#
#
# ### Requirement
# 1. Before running this notebook, please set the Colab runtime environment to GPU via the menu *Runtime => Change runtime type => GPU*.
#
#
# In[1]:
get_ipython().system('nvidia-smi')
# The below code checks whether a Tensor-Core GPU is present. Tensor Cores can accelerate large matrix operations by performing mixed-precision matrix multiply and accumulate calculations in a single operation.
# In[2]:
get_ipython().run_line_magic('tensorflow_version', '1.x')
import tensorflow as tf
print(tf.__version__) # This notebook runs on TensorFlow 1.x.
from tensorflow.python.client import device_lib
def check_tensor_core_gpu_present():
local_device_protos = device_lib.list_local_devices()
for line in local_device_protos:
if "compute capability" in str(line):
compute_capability = float(line.physical_device_desc.split("compute capability: ")[-1])
if compute_capability>=7.0:
return True
print("Tensor Core GPU Present:", check_tensor_core_gpu_present())
tensor_core_gpu = check_tensor_core_gpu_present()
# 2. Next, we clone the NVIDIA Github UNet Industrial repository and set up the workspace.
# In[3]:
get_ipython().system('git clone https://github.com/NVIDIA/DeepLearningExamples')
# In[4]:
get_ipython().run_cell_magic('bash', '', 'cd DeepLearningExamples\ngit checkout master\n')
# In[5]:
import os
WORKSPACE_DIR='/content/DeepLearningExamples/TensorFlow/Segmentation/UNet_Industrial/notebooks'
os.chdir(WORKSPACE_DIR)
print (os.getcwd())
# In[6]:
get_ipython().system('pip install tensorflow_hub==0.6.0')
# ## Data download
#
# We will first download some data for testing, in particular, the [Weakly Supervised Learning for Industrial Optical Inspection (DAGM 2007)](https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html) competition dataset.
#
# > The competition is inspired by problems from industrial image processing. In order to satisfy their customers' needs, companies have to guarantee the quality of their products, which can often be achieved only by inspection of the finished product. Automatic visual defect detection has the potential to reduce the cost of quality assurance significantly.
# >
# > The competitors have to design a stand-alone algorithm which is able to detect miscellaneous defects on various background textures.
# >
# > The particular challenge of this contest is that the algorithm must learn, without human intervention, to discern defects automatically from a weakly labeled (i.e., labels are not exact to the pixel level) training set, the exact characteristics of which are unknown at development time. During the competition, the programs have to be trained on new data without any human guidance.
#
# **Source:** https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html
#
# In[ ]:
get_ipython().system(' ./download_and_preprocess_dagm2007_public.sh ./data')
# The final data directory should look like:
#
# ```
# ./data
# raw_images
# public
# Class1
# Class2
# Class3
# Class4
# Class5
# Class6
# Class1_def
# Class2_def
# Class3_def
# Class4_def
# Class5_def
# Class6_def
# private
# zip_files
# ```
# Each data directory contains training images corresponding to one of 6 types of defects.
# ## Model download from NVIDIA NGC model repository
#
# NVIDIA provides pretrained UNet models along with many other deep learning models such as ResNet, BERT, Transformer, SSD... at https://ngc.nvidia.com/catalog/models. Here, we will download and unzip pretrained UNet models corresponding to the 10 classes of the DAGM 2007 defects detection dataset.
# In[ ]:
get_ipython().run_cell_magic('bash', '', 'rm unet_model.zip\nwget -nc -q --show-progress -O unet_model.zip \\\n"https://api.ngc.nvidia.com/v2/models/nvidia/unetindustrial_for_tensorflow_32/versions/1/zip"\nunzip -o ./unet_model.zip\n')
# Upon completion of the download, the following model directories should exist, containing pre-trained models corresponding to the 10 classes of the DAGM 2007 competition data set.
# In[10]:
get_ipython().system('ls JoC_UNET_Industrial_FP32_TF_20190522')
# ## Inference with Native TensorFlow
#
# We will now launch an interactive sesssion to verify the correctness of the pretrained models, where you can load new test images. First, we load some required libraries and define some helper functions to load the pretrained UNet models.
# In[11]:
get_ipython().system('pip install ../dllogger')
# In[ ]:
import dllogger
from dllogger.logger import LOGGER
# In[12]:
try:
__import__("horovod")
except ImportError:
os.system("pip install horovod")
import horovod.tensorflow
import sys
sys.path.insert(0,'/content/DeepLearningExamples/TensorFlow/Segmentation/UNet_Industrial')
from model.unet import UNet_v1
# In[ ]:
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# We will now load and inspect one defect image from Class 1.
# In[14]:
img = mpimg.imread('./data/raw_images/public/Class1_def/1.png')
plt.figure(figsize = (10,10));
plt.imshow(img, cmap='gray');
# As we can see in this figure, there exists a defective area in the top left corner. We will now load the model and carry out inference on the normalized test image.
# In[ ]:
# Image preprocessing
img = np.expand_dims(img, axis=2)
img = np.expand_dims(img, axis=0)
img = (img-0.5)/0.5
# In[ ]:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
graph = tf.Graph()
with graph.as_default():
with tf.Session(config=config) as sess:
network = UNet_v1(
model_name="UNet_v1",
input_format='NHWC',
compute_format='NHWC',
n_output_channels=1,
unet_variant='tinyUNet',
weight_init_method='he_uniform',
activation_fn='relu'
)
tf_input = tf.placeholder(tf.float32, [None, 512, 512, 1], name='input')
outputs, logits = network.build_model(tf_input)
saver = tf.train.Saver()
# Restore variables from disk.
saver.restore(sess, "JoC_UNET_Industrial_FP32_TF_20190522/Class+1/model.ckpt-2500")
output = sess.run([outputs, logits], feed_dict={tf_input: img})
# In[21]:
# Print out model predicted mask
plt.figure(figsize = (10,10))
plt.imshow(np.squeeze(output[0]), cmap='gray')
# As expected, the model points out the correct defective area in this image. Please feel free to try out other defective images for Class 1 within `./data/raw_images/public/Class1_def/`, or load the model and test data for other classes from 1 to 10.
# In[22]:
get_ipython().system('ls ./data/raw_images/public/Class1_def/')
# ## Export UNet models to TF-Hub
#
# We will now export the 10 pretrained UNet models into TensorFlow hub modules.
# In[ ]:
def module_fn(is_training=False):
"""A module_fn for use with hub.create_module_spec().
Args:
is_training: a boolean meant to control whether batch norm, dropout etc.
are built in training or inference mode for this graph version (TODO)
"""
# Set up the module input
with tf.name_scope('hub_input'):
tf_input = tf.placeholder(tf.float32, [None, 512, 512, 1], name='input')
# Build the net.
network = UNet_v1(
model_name="UNet_v1",
input_format='NHWC',
compute_format='NHWC',
n_output_channels=1,
unet_variant='tinyUNet',
weight_init_method='he_uniform',
activation_fn='relu'
)
outputs, logits = network.build_model(tf_input)
# Add the default signature.
hub.add_signature('default', dict(images=tf_input), dict(default=outputs))
# In[ ]:
import tensorflow_hub as hub
tags_and_args = [
# The default graph is built with batch_norm, dropout etc. in inference
# mode. This graph version is good for inference, not training.
([], {
'is_training': False
}),
# A separate 'train' graph builds batch_norm, dropout etc. in training
# mode.
(['train'], {
'is_training': True # TODO
}),
]
drop_collections = [
'moving_vars', tf.GraphKeys.GLOBAL_STEP,
tf.GraphKeys.MOVING_AVERAGE_VARIABLES
]
spec = hub.create_module_spec(module_fn, tags_and_args, drop_collections)
# In[25]:
for class_id in range (1, 11):
with tf.Graph().as_default():
module = hub.Module(spec)
variables_to_restore = module.variable_map
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(model_path="JoC_UNET_Industrial_FP32_TF_20190522/Class+%d/model.ckpt-2500"%class_id,
var_list=variables_to_restore)
with tf.Session() as session:
init_fn(session)
module.export("./NVIDIA/Unet/Class_%d/"%class_id, session=session)
# ## Save TF-Hub modules to Google Drive (Optional)
#
# In this step we will persist the created TF-Hub modules to Google Drive. Execute the below cell to authorize Colab to access your Google Drive content, then copy the created UNet TF-Hub modules to Google Drive.
# In[ ]:
from google.colab import drive
drive.mount('/content/gdrive')
# In[ ]:
get_ipython().system('cp -r "./NVIDIA" "/content/gdrive/My Drive/"')
# ## Inference with TF-Hub modules
#
# Next, we will load and do inference with the created TensorFlow Hub modules. In order to load TF Hub modules, there are several options:
#
# - Load from a local cache or directory
#
# - Load from a remote repository
#
#
# In[ ]:
import tensorflow_hub as hub
# Loading from a local cache/directory
#module = hub.Module("NVIDIA/Unet/Class_1", trainable=False)
# Loading from a remote repository. The 10 NVIDIA UNet TF-Hub modules are available at
# https://tfhub.dev/nvidia/unet/industrial/class_1/1 (similarly for class 2, 3 ...) and
# https://developer.download.nvidia.com/compute/redist/Binary_Files/unet_tfhub_modules/class_{1..10}
module = hub.Module("https://tfhub.dev/nvidia/unet/industrial/class_1/1") # or class_2, class_3 etc...
#module = hub.Module("https://developer.download.nvidia.com/compute/redist/Binary_Files/unet_tfhub_modules/class_1/1.tar.gz") # or class_2, class_3 etc...
# In[27]:
print(module.get_signature_names())
# In[28]:
print(module.get_input_info_dict()) # When no signature is given, considers it as 'default'
# In[29]:
print(module.get_output_info_dict())
# In[ ]:
# Load a test image
img = mpimg.imread('./data/raw_images/public/Class1_def/1.png')
# Image preprocessing
img = np.expand_dims(img, axis=2)
img = np.expand_dims(img, axis=0)
img = (img-0.5)/0.5
# In[31]:
with tf.Session() as sess:
output = module(img)
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
pred = sess.run(output)
# In[32]:
# Print out model predicted mask
plt.figure(figsize = (10,10))
plt.imshow(np.squeeze(pred), cmap='gray')
# # Conclusion
#
# In this notebook, we have walked through the complete process of creating TF-Hub modules from pretrained UNet-Industrial models, then test the correctness of the created TF-Hub modules.
# ## What's next
# Now it's time to try the UNet-Industrial TensorFlow Hub modules on your own data.
# In[ ]:
|
TensorFlow2/LanguageModeling/BERT/official/utils/export | export | export | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convenience functions for exporting models as SavedModels or other types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def build_tensor_serving_input_receiver_fn(shape, dtype=tf.float32,
batch_size=1):
"""Returns a input_receiver_fn that can be used during serving.
This expects examples to come through as float tensors, and simply
wraps them as TensorServingInputReceivers.
Arguably, this should live in tf.estimator.export. Testing here first.
Args:
shape: list representing target size of a single example.
dtype: the expected datatype for the input example
batch_size: number of input tensors that will be passed for prediction
Returns:
A function that itself returns a TensorServingInputReceiver.
"""
def serving_input_receiver_fn():
# Prep a placeholder where the input example will be fed in
features = tf.compat.v1.placeholder(
dtype=dtype, shape=[batch_size] + shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver(
features=features, receiver_tensors=features)
return serving_input_receiver_fn
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ModulationRemovalPlugin | taco2ModulationRemovalPlugin | taco2ModulationRemovalKernel | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_MODULATIONREMOVALKERNEL_H
#define TT2I_MODULATIONREMOVALKERNEL_H
#include "cuda_runtime.h"
namespace nvinfer1
{
namespace plugin
{
class Taco2ModulationRemovalKernel
{
public:
/**
* @brief Remove modulation effects from the input.
*
* @param batchSize The number of items in the batch.
* @param weightsDevice The weights on the device.
* @param inputDevice The input on the device.
* @param outputDevice The output on the device.
* @param inputLength The length of the input.
* @param filterLength The length of filters.
* @param hopLength The hop length.
* @param stream The strema to operate on.
*/
static void compute(const int batchSize, const float* weightsDevice, const float* inputDevice, float* outputDevice,
int inputLength, int filterLength, int hopLength, cudaStream_t stream);
};
} // namespace plugin
} // namespace nvinfer1
#endif
|
TensorFlow/Recommendation/WideAndDeep/scripts | scripts | DGX1_benchmark_training_fp32_1gpu | #!/bin/bash
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
set -e
python -m trainer.task \
--benchmark_warmup_steps 500 \
--benchmark_steps 1000 \
--gpu \
--benchmark
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/feature_specs | feature_specs | 13_num_10_cat | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
- cat_6.bin
- cat_7.bin
- cat_8.bin
- cat_9.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
- num_3
- num_4
- num_5
- num_6
- num_7
- num_8
- num_9
- num_10
- num_11
- num_12
feature_spec:
cat_0.bin:
cardinality: 100000
dtype: int32
cat_1.bin:
cardinality: 100001
dtype: int32
cat_2.bin:
cardinality: 100002
dtype: int32
cat_3.bin:
cardinality: 100003
dtype: int32
cat_4.bin:
cardinality: 100004
dtype: int32
cat_5.bin:
cardinality: 100005
dtype: int32
cat_6.bin:
cardinality: 100006
dtype: int32
cat_7.bin:
cardinality: 100007
dtype: int32
cat_8.bin:
cardinality: 100008
dtype: int32
cat_9.bin:
cardinality: 10009
dtype: int16
label:
dtype: bool
num_0:
dtype: float16
num_1:
dtype: float16
num_10:
dtype: float16
num_11:
dtype: float16
num_12:
dtype: float16
num_2:
dtype: float16
num_3:
dtype: float16
num_4:
dtype: float16
num_5:
dtype: float16
num_6:
dtype: float16
num_7:
dtype: float16
num_8:
dtype: float16
num_9:
dtype: float16
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: split_binary
- features:
- label
files:
- test/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- test/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- test/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- test/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- test/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- test/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- test/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- test/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- test/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- test/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- test/cat_9.bin
type: split_binary
train:
- features: *id001
files:
- train/numerical.bin
type: split_binary
- features:
- label
files:
- train/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- train/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- train/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- train/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- train/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- train/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- train/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- train/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- train/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- train/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- train/cat_9.bin
type: split_binary
|
TensorFlow/Classification/ConvNets/dataprep | dataprep | imagewoof_synsets | n02086240
n02087394
n02088364
n02089973
n02093754
n02096294
n02099601
n02105641
n02111889
n02115641
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/cc/kernels/cuda_kernels | cuda_kernels | dot_based_interact_fp16 | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include "dot_based_interact_shared_utils.cuh"
struct __align__(8) half4 {
half2 vals[2];
};
using namespace nvcuda;
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint M_BLOCKS,
uint K_BLOCKS,
uint SMEM_STRIDE,
uint SMEM_STRIDE_ACC,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2,
bool IS_ALIGNED>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernelFP16(const __half *__restrict input,
__half *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding, //nearest greater multiple of 16
uint num_cols_after_padding, //nearest greater multiple of 16
uint smem_elems_per_warp, //size of shmem allocation for each warp
uint smem_rows_per_warp, //how many rows are multiplied in a single pass
uint output_size, //this is with padding
uint num_row_steps,
uint num_col_steps,
uint padding_size) {
//right now, there are two solutions implemented for padding out the matrices to have dims that are multiples of 16
//first, they are both padded out to be multiples: we can just take it and continue
//second, in the multiplication we inset the second tile so that it overlaps with the first one, then skip the doubled up part when unwinding the resulting matrix
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (WARP_SIZE - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
const half *sample_input = input + num_rows * num_cols * sample_id;
// Input -> shmem
if (IS_ALIGNED) { //TODO use contexpr if when makefile is upgraded to C++17
for(uint idx = lane_id; idx < (num_cols >> 2); idx+=WARP_SIZE){
for (int i = 0; i < num_rows; ++i, sample_input += num_cols) {
((float2 *)(shmem + i * SMEM_STRIDE))[idx] = ((float2 *)sample_input)[idx];//todo will this not read OOB for last sample?
}
}
}
else {
for (uint i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
(shmem + i * SMEM_STRIDE)[idx] = sample_input[idx];
}
}
}
// pad out each row to have a multiple of 16 columns
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
// pad out with zero rows until we have a multiple of 16 rows
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
for (uint idx = lane_id; idx < (num_cols_after_padding >> 2); idx += WARP_SIZE) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[idx] = zeros;
}
}
__syncwarp();
// copy bottom mlp output into the output tensor
half *gmem_output = output + output_size * sample_id;
if (IS_ALIGNED) {
for(uint idx = lane_id; idx < (num_cols >> 2); idx+=WARP_SIZE){
((float2 *)gmem_output)[idx] = ((float2 *)shmem)[idx];
}
}
else {
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
gmem_output[idx] = shmem[idx];
}
}
// perform tiled multiplication using tensor cores
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]); //todo we can get a speedup by skipping one of the multiplications - they are redundant
}
}
}
// dump accumulators back into shared memory
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
// unwind accumulators into the output memory
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
for(uint idx = lane_id; idx < i; idx+=WARP_SIZE){
uint offset = (i * (i - 1)) >> 1; //sum of previously unwound rows
gmem_interact_output[offset + idx] = __float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + idx]);
}
}
// Add padding to the output vectors
if (lane_id < padding_size) {
gmem_output[output_size - lane_id - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2,
bool IS_ALIGNED>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractBwdKernelFP16(const __half *__restrict input,
const __half *__restrict upstream_grad,
half __restrict *grad,
half __restrict *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint sample_size,
uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride,
uint input_size_elems,
uint input_stride,
uint num_row_steps,
uint num_col_steps,
uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (WARP_SIZE - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
uint gmem_input_sample_offset = sample_id * sample_size;
const half *gmem_input = &input[gmem_input_sample_offset];
// Interaction Gradient
const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
half *gmem_grad = &grad[gmem_grad_sample_offset];
// Bottom MLP gradient
half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
if(IS_ALIGNED){
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 3); idx += WARP_SIZE) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_ugrad_interactions)[idx];
}
uint offset = (interaction_ugrad_size >> 3) << 3;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
}
else {
#pragma unroll
for (uint idx = lane_id; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
}
__syncwarp();
// Form the 2D ugrad matrix.
for(uint idx = lane_id; idx < num_rows_after_padding; idx+=WARP_SIZE){
uint ugrad_flat_index = ((idx * (idx - 1)) >> 1);
uint ugrad_offset_1 = idx * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < idx && idx < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= idx && idx < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + idx] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + idx] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
if (IS_ALIGNED) {
for (uint idx = lane_id; idx < (num_cols >> 2); idx+=WARP_SIZE) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
const half *gmem_row_ptr = &gmem_input[row * num_cols];
((float2 *)smem_row_ptr)[idx] = ((float2 *)gmem_row_ptr)[idx];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
for (uint idx = lane_id; idx < (num_cols_after_padding >> 2); idx +=WARP_SIZE){
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
((half4 *)smem_row_ptr)[idx] = zeros;
}
}
}
else {
//UNALIGNED
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
const half *gmem_row_ptr = &gmem_input[row * num_cols];
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
smem_row_ptr[idx] = gmem_row_ptr[idx];
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
smem_row_ptr[idx] = __float2half(0);
}
}
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
for (uint idx = lane_id; idx < num_cols_after_padding; idx += WARP_SIZE) {
smem_row_ptr[idx] = __float2half(0);
}
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[ROW_TILES_PER_STEP]
[ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
// gmem_grad_col = col_step * tile_dim + lane_id
// this is correct because lane_id spans 0..tile_dim-1
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
// only copy over result if we are not in the padding
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
}
}
}
if (IS_ALIGNED) {
for(uint idx = lane_id; idx < (num_cols >> 2); idx+=WARP_SIZE){
((float2 *)gmem_mlp_grad)[idx] = ((float2 *)gmem_ugrad)[idx];
}
}
else {
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
gmem_mlp_grad[idx] = gmem_ugrad[idx];
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.