relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | start_NVIDIA-T4 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Install Docker
. /etc/os-release && \
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \
echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \
curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \
apt-get update && \
apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2
# Install packages
pip install -r triton/runner/requirements.txt
# Evaluate Runner
python3 -m "triton.runner.__main__" \
--config-path "triton/runner/config_NVIDIA-T4.yaml" \
--device 0 |
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/library | library | utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List, Optional
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
def _mark_batch_axis(shape, batch_axis: int):
shape = list(shape)
shape[batch_axis] = -1
return tuple(shape)
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
if batch_size_dim is not None:
input_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in input_shapes.items()}
output_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in output_shapes.items()}
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k in all_shapes:
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple(min(a, b) for a, b in zip(min_shapes[k], shape))
max_shapes[k] = tuple(max(a, b) for a, b in zip(max_shapes[k], shape))
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | pipeline_impl | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
\
--checkpoint ${CHECKPOINT_DIR}/ \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--dataset ${DATASETS_DIR}/${DATASET} \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.08 \
--max-workspace-size 10000000000 \
--atol target__0=100 \
--rtol target__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
if [[ "${FORMAT}" == "ts-trace" || "${FORMAT}" == "ts-script" ]]; then
export CONFIG_FORMAT="torchscript"
else
export CONFIG_FORMAT="${FORMAT}"
fi
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${CONFIG_FORMAT} \
--model-control-mode ${TRITON_LOAD_MODEL_METHOD} \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching dynamic \
--preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \
--max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \
--engine-count-per-device ${DEVICE}=${TRITON_GPU_ENGINE_COUNT}
""",
)
)
pipeline.triton_prepare_performance_profiling_data(
commands=(
r"""
mkdir -p ${SHARED_DIR}/input_data
""",
r"""
python triton/prepare_input_data.py \
--input-data-dir ${SHARED_DIR}/input_data/ \
--dataset ${DATASETS_DIR}/${DATASET} \
--checkpoint ${CHECKPOINT_DIR}/ \
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data/data.json \
--batch-sizes ${BATCH_SIZE} \
--number-of-triton-instances ${TRITON_INSTANCES} \
--batching-mode static \
--evaluation-mode offline \
--measurement-request-count ${REQUEST_COUNT} \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data ${SHARED_DIR}/input_data/data.json \
--batch-sizes ${BATCH_SIZE} \
--number-of-triton-instances ${TRITON_INSTANCES} \
--number-of-model-instances ${TRITON_GPU_ENGINE_COUNT} \
--batching-mode dynamic \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) |
TensorFlow/Translation/GNMT/scripts/docker | docker | interactive | #!/bin/bash
nvidia-docker run -it --rm --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 -v $PWD:/workspace/gnmt gnmt_tf bash
|
PyTorch/Classification/GPUNet/triton | triton | run_inference_on_fw | #!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_inference_on_fw.py` script.
It infers data obtained from pointed data loader locally and saves received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Example call:
```shell script
python ./triton/run_inference_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-size 32 \
--output-dir /results/dump_local \
--dump-labels
```
"""
import argparse
import logging
import os
from pathlib import Path
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseRunner,
load_from_file,
)
from .deployment_toolkit.dump import JsonDumpWriter # noqa: E402 module level import not at top of file
from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("run_inference_on_fw")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info("Data loader initialized; Running inference")
for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10):
y_pred = runner_session(x)
data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real)
writer.write(**data)
LOGGER.info("Inference finished")
if __name__ == "__main__":
main()
|
TensorFlow2/Segmentation/UNet_Medical/examples | examples | unet_TRAIN_BENCHMARK | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches U-Net run in FP32 for training benchmarking. Usage:
# bash unet_TRAIN_BENCHMARK.sh <number of gpus> <path to dataset> <path to results directory> <batch size>
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --batch_size $4 --exec_mode train --augment --benchmark --warmup_steps 200 --max_steps 1000 --xla |
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | ssdlite_mobilenet_v2_coco | # SSDLite with Mobilenet v2 configuration for MSCOCO Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
ssd {
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.2
max_scale: 0.95
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.3333
}
}
image_resizer {
fixed_shape_resizer {
height: 300
width: 300
}
}
box_predictor {
convolutional_box_predictor {
min_depth: 0
max_depth: 0
num_layers_before_predictor: 0
use_dropout: false
dropout_keep_probability: 0.8
kernel_size: 3
use_depthwise: true
box_code_size: 4
apply_sigmoid_to_scores: false
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
train: true,
scale: true,
center: true,
decay: 0.9997,
epsilon: 0.001,
}
}
}
}
feature_extractor {
type: 'ssd_mobilenet_v2'
min_depth: 16
depth_multiplier: 1.0
use_depthwise: true
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
train: true,
scale: true,
center: true,
decay: 0.9997,
epsilon: 0.001,
}
}
}
loss {
classification_loss {
weighted_sigmoid {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
hard_example_miner {
num_hard_examples: 3000
iou_threshold: 0.99
loss_type: CLASSIFICATION
max_negatives_per_positive: 3
min_negatives_per_image: 3
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
batch_size: 24
optimizer {
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
}
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
fine_tune_checkpoint_type: "detection"
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
ssd_random_crop {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
}
eval_config: {
num_examples: 8000
# Note: The below line limits the evaluation process to 10 evaluations.
# Remove the below line to evaluate indefinitely.
max_evals: 10
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
} |
PyTorch/SpeechRecognition/wav2vec2/utils | utils | preprocessing_utils | #!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import multiprocessing
import functools
import sox
from tqdm import tqdm
def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None,
overwrite=True):
speed = speed or []
speed.append(1)
speed = list(set(speed)) # Make uniqe
input_fname = os.path.join(input_dir,
data['input_relpath'],
data['input_fname'])
input_sr = sox.file_info.sample_rate(input_fname)
target_sr = target_sr or input_sr
os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True)
output_dict = {}
output_dict['transcript'] = data['transcript'].lower().strip()
output_dict['files'] = []
fname = os.path.splitext(data['input_fname'])[0]
for s in speed:
output_fname = fname + '{}.wav'.format('' if s == 1 else '-{}'.format(s))
output_fpath = os.path.join(dest_dir,
data['input_relpath'],
output_fname)
if not os.path.exists(output_fpath) or overwrite:
cbn = sox.Transformer().speed(factor=s).convert(target_sr)
cbn.build(input_fname, output_fpath)
file_info = sox.file_info.info(output_fpath)
file_info['fname'] = os.path.join(os.path.basename(dest_dir),
data['input_relpath'],
output_fname)
file_info['speed'] = s
output_dict['files'].append(file_info)
if s == 1:
file_info = sox.file_info.info(output_fpath)
output_dict['original_duration'] = file_info['duration']
output_dict['original_num_samples'] = file_info['num_samples']
return output_dict
def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed,
overwrite, parallel):
with multiprocessing.Pool(parallel) as p:
func = functools.partial(preprocess, input_dir=input_dir,
dest_dir=dest_dir, target_sr=target_sr,
speed=speed, overwrite=overwrite)
dataset = list(tqdm(p.imap(func, dataset), total=len(dataset)))
return dataset
|
TensorFlow2/Detection/Efficientdet/model | model | fpn_configs | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BiFPN/QuFPN and other FPN configs.
BiFPN is presented in the EfficientDet paper.
QuFPN is proposed in https://github.com/google/automl/pull/580
"""
import itertools
from utils import hparams_config
def bifpn_config(min_level, max_level, weight_method):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
# Node id starts from the input features and monotonically increase whenever
# a new node is added. Here is an example for level P3 - P7:
# P7 (4) P7" (12)
# P6 (3) P6' (5) P6" (11)
# P5 (2) P5' (6) P5" (10)
# P4 (1) P4' (7) P4" (9)
# P3 (0) P3" (8)
# So output would be like:
# [
# {'feat_level': 6, 'inputs_offsets': [3, 4]}, # for P6'
# {'feat_level': 5, 'inputs_offsets': [2, 5]}, # for P5'
# {'feat_level': 4, 'inputs_offsets': [1, 6]}, # for P4'
# {'feat_level': 3, 'inputs_offsets': [0, 7]}, # for P3"
# {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}, # for P4"
# {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}, # for P5"
# {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}, # for P6"
# {'feat_level': 7, 'inputs_offsets': [4, 11]}, # for P7"
# ]
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i),
level_last_id(i + 1)]
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)]
})
node_ids[i].append(next(id_cnt))
return p
def qufpn_config(min_level, max_level, weight_method=None):
"""A dynamic quad fpn config that can adapt to different min/max levels."""
# It extends the idea of BiFPN, and has four paths:
# (up_down -> bottom_up) + (bottom_up -> up_down).
# See test for an example for level 2 and 7.
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
p.quad_method = 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
level_first_id = lambda level: node_ids[level][0]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path 1.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i),
level_last_id(i + 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(min_level + 1, max_level):
# bottom-up path 2.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
i = max_level
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_first_id(i)] + [level_last_id(i - 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(min_level + 1, max_level + 1, 1):
# bottom-up path 3.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [
level_first_id(i),
level_last_id(i - 1) if i != min_level + 1 else level_first_id(i -
1)
],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(max_level - 1, min_level, -1):
# top-down path 4.
p.nodes.append({
'feat_level':
i,
'inputs_offsets': [node_ids[i][0]] + [node_ids[i][-1]] +
[level_last_id(i + 1)],
'weight_method':
p.weight_method
})
node_ids[i].append(next(id_cnt))
i = min_level
p.nodes.append({
'feat_level': i,
'inputs_offsets': [node_ids[i][0]] + [level_last_id(i + 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(max_level, min_level - 1, -1):
# quad-add path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [node_ids[i][2], node_ids[i][4]],
'weight_method': p.quad_method
})
node_ids[i].append(next(id_cnt))
return p
def get_fpn_config(fpn_name, min_level, max_level, weight_method):
"""Get fpn related configuration."""
if not fpn_name:
fpn_name = 'bifpn'
name_to_config = {
'bifpn': bifpn_config(min_level, max_level, weight_method),
'qufpn': qufpn_config(min_level, max_level, weight_method),
# legacy only: to be deprecated.
'bifpn_dyn': bifpn_config(min_level, max_level, weight_method),
}
return name_to_config[fpn_name]
|
PyTorch/SpeechSynthesis/FastPitch/hifigan | hifigan | data_function | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# mel_spectrogram, MelDataset
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
from librosa.util import normalize
from numpy import random
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from common.audio_processing import dynamic_range_compression
from common.utils import load_filepaths_and_text, load_wav
MAX_WAV_VALUE = 32768.0
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size,
fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
fmax_key = f'{fmax}_{y.device}'
if fmax_key not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[fmax_key] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
pad = int((n_fft-hop_size)/2)
y = F.pad(y.unsqueeze(1), (pad, pad), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size,
window=hann_window[str(y.device)], center=center,
pad_mode='reflect', normalized=False, onesided=True,
return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = dynamic_range_compression(spec) # spectral normalize
return spec
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True,
device=None, fmax_loss=None, fine_tuning=False,
base_mels_path=None, repeat=1, deterministic=False,
max_wav_value=MAX_WAV_VALUE):
self.audio_files = training_files
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.max_wav_value = max_wav_value
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
self.repeat = repeat
self.deterministic = deterministic
self.rng = random.default_rng()
def __getitem__(self, index):
if index >= len(self):
raise IndexError('Dataset index out of range')
rng = random.default_rng(index) if self.deterministic else self.rng
index = index % len(self.audio_files) # collapse **after** setting seed
filename = self.audio_files[index]
audio, sampling_rate = load_wav(filename)
audio = audio / self.max_wav_value
if not self.fine_tuning:
audio = normalize(audio) * 0.95
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = rng.integers(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = F.pad(audio, (0, self.segment_size - audio.size(1)))
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size,
self.win_size, self.fmin, self.fmax,
center=False)
else:
mel = np.load(
os.path.join(self.base_mels_path,
os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel).float()
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = rng.integers(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
a = mel_start * self.hop_size
b = (mel_start + frames_per_seg) * self.hop_size
audio = audio[:, a:b]
else:
mel = F.pad(mel, (0, frames_per_seg - mel.size(2)))
audio = F.pad(audio, (0, self.segment_size - audio.size(1)))
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size,
self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files) * self.repeat
def get_data_loader(args, distributed_run, train=True, batch_size=None,
val_kwargs=None):
filelists = args.training_files if train else args.validation_files
files = load_filepaths_and_text(args.dataset_path, filelists)
files = list(zip(*files))[0]
dataset_kw = {
'segment_size': args.segment_size,
'n_fft': args.filter_length,
'num_mels': args.num_mels,
'hop_size': args.hop_length,
'win_size': args.win_length,
'sampling_rate': args.sampling_rate,
'fmin': args.mel_fmin,
'fmax': args.mel_fmax,
'fmax_loss': args.mel_fmax_loss,
'max_wav_value': args.max_wav_value,
'fine_tuning': args.fine_tuning,
'base_mels_path': args.input_mels_dir,
'deterministic': not train
}
if train:
dataset = MelDataset(files, **dataset_kw)
sampler = DistributedSampler(dataset) if distributed_run else None
else:
dataset_kw.update(val_kwargs or {})
dataset = MelDataset(files, **dataset_kw)
sampler = (DistributedSampler(dataset, shuffle=False)
if distributed_run else None)
loader = DataLoader(dataset,
# NOTE On DGX-1 and DGX A100 =1 is optimal
num_workers=args.num_workers if train else 1,
shuffle=(train and not distributed_run),
sampler=sampler,
batch_size=batch_size or args.batch_size,
pin_memory=True,
persistent_workers=True,
drop_last=train)
return loader
|
TensorFlow/Translation/GNMT | GNMT | nmt | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
import subprocess
# import matplotlib.image as mpimg
import numpy as np
import time
import tensorflow as tf
import dllogger
import estimator
from utils import evaluation_utils
from utils import iterator_utils
from utils import misc_utils as utils
from utils import vocab_utils
from variable_mgr import constants
utils.check_tensorflow_version()
FLAGS = None
# LINT.IfChange
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument(
"--num_units", type=int, default=1024, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=4, help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument(
"--encoder_type",
type=str,
default="gnmt",
help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument(
"--residual",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument(
"--attention",
type=str,
default="normed_bahdanau",
help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="gnmt_v2",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument(
"--optimizer", type=str, default="adam", help="sgd | adam")
parser.add_argument(
"--learning_rate",
type=float,
default=5e-4,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=200,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="luong234", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--max_train_epochs", type=int, default=6, help="Max number of epochs.")
parser.add_argument(
"--target_bleu", type=float, default=None, help="Target bleu.")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
parser.add_argument("--label_smoothing", type=float, default=0.1,
help=("If nonzero, smooth the labels towards "
"1/num_classes."))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default="en", help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default="de", help="Target suffix, e.g., de.")
parser.add_argument(
"--data_dir", type=str, default="data/wmt16_de_en",
help="Training/eval data directory.")
parser.add_argument(
"--train_prefix",
type=str,
default="train.tok.clean.bpe.32000",
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default="newstest2014.tok.bpe.32000",
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--translate_file",
type=str,
help="File to translate, works only with translate mode")
parser.add_argument(
"--output_dir", type=str, default="results",
help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default="vocab.bpe.32000",
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument(
"--embed_prefix",
type=str,
default=None,
help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formatted txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument(
"--share_vocab",
type="bool",
nargs="?",
const=True,
default=True,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument(
"--src_max_len",
type=int,
default=50,
help="Max length of src sequences during training (including EOS).")
parser.add_argument(
"--tgt_max_len",
type=int,
default=50,
help="Max length of tgt sequences during training (including BOS).")
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference (including EOS).")
parser.add_argument("--tgt_max_len_infer", type=int, default=80,
help="""\
Max length of tgt sequences during inference (including BOS). Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument("--forget_bias", type=float, default=0.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=128, help="Total batch size.")
parser.add_argument(
"--num_buckets",
type=int,
default=5,
help="Put data into similar-length buckets (only for training).")
# SPM
parser.add_argument("--subword_option", type=str, default="bpe",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument(
"--save_checkpoints_steps", type=int, default=2000,
help="save_checkpoints_steps")
parser.add_argument(
"--log_step_count_steps", type=int, default=10,
help=("The frequency, in number of global steps, that the global step "
"and the loss will be logged during training"))
parser.add_argument(
"--num_gpus", type=int, default=1, help="Number of gpus in each worker.")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument(
"--random_seed",
type=int,
default=1,
help="Random seed (>0, set a specific seed).")
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default=None,
help="Checkpoint file to load a model for inference. (defaults to newest checkpoint)")
parser.add_argument(
"--infer_batch_size",
type=int,
default=128,
help="Batch size for inference mode.")
parser.add_argument("--detokenizer_file", type=str,
default=None,
help=("""Detokenizer script file. Default: DATA_DIR/mosesdecoder/scripts/tokenizer/detokenizer.perl"""))
parser.add_argument("--tokenizer_file", type=str,
default=None,
help=("""Tokenizer script file. Default: DATA_DIR/mosesdecoder/scripts/tokenizer/tokenizer.perl"""))
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="beam_search",
choices=["greedy", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=5,
help=("""\
beam width when using beam search decoder. If 0, use standard
decoder with greedy helper.\
"""))
parser.add_argument(
"--length_penalty_weight",
type=float,
default=0.6,
help="Length penalty for beam search.")
parser.add_argument(
"--coverage_penalty_weight",
type=float,
default=0.1,
help="Coverage penalty for beam search.")
# Job info
parser.add_argument("--num_workers", type=int, default=1,
help="Number of workers (inference only).")
parser.add_argument("--amp", action='store_true',
help="use amp for training and inference")
parser.add_argument("--use_fastmath", type="bool", default=False,
help="use_fastmath for training and inference")
parser.add_argument("--use_fp16", type="bool", default=False,
help="use_fp16 for training and inference")
parser.add_argument(
"--fp16_loss_scale",
type=float,
default=128,
help="If fp16 is enabled, the loss is multiplied by this amount "
"right before gradients are computed, then each gradient "
"is divided by this amount. Mathematically, this has no "
"effect, but it helps avoid fp16 underflow. Set to 1 to "
"effectively disable.")
parser.add_argument(
"--enable_auto_loss_scale",
type="bool",
default=True,
help="If True and use_fp16 is True, automatically adjust the "
"loss scale during training.")
parser.add_argument(
"--fp16_inc_loss_scale_every_n",
type=int,
default=128,
help="If fp16 is enabled and enable_auto_loss_scale is "
"True, increase the loss scale every n steps.")
parser.add_argument(
"--check_tower_loss_numerics",
type="bool",
default=False, # Set to false for xla.compile()
help="whether to check tower loss numerics")
parser.add_argument(
"--use_fp32_batch_matmul",
type="bool",
default=False,
help="Whether to use fp32 batch matmul")
# Performance
# XLA
parser.add_argument(
"--force_inputs_padding",
type="bool",
default=False,
help="Force padding input batch to src_max_len and tgt_max_len")
parser.add_argument(
"--use_xla",
type="bool",
default=False,
help="Use xla to compile a few selected locations, mostly Defuns.")
parser.add_argument(
"--xla_compile",
type="bool",
default=False,
help="Use xla.compile() for each tower's fwd and bak pass.")
parser.add_argument(
"--use_autojit_xla",
type="bool",
default=False,
help="Use auto jit xla.")
# GPU knobs
parser.add_argument(
"--use_pintohost_optimizer",
type="bool",
default=False,
help="whether to use PinToHost optimizer")
parser.add_argument(
"--use_cudnn_lstm",
type="bool",
default=False,
help="whether to use cudnn_lstm for encoder, non residual layers")
parser.add_argument(
"--use_loose_bidi_cudnn_lstm",
type="bool",
default=False,
help="whether to use loose bidi cudnn_lstm")
parser.add_argument(
"--use_fused_lstm",
type="bool",
default=True,
help="whether to use fused lstm and variant. If enabled, training will "
"use LSTMBlockFusedCell, infer will use LSTMBlockCell when appropriate.")
parser.add_argument(
"--use_fused_lstm_dec",
type="bool",
default=False,
help="whether to use fused lstm for decoder (training only).")
parser.add_argument(
"--gpu_indices",
type=str,
default="",
help="Indices of worker GPUs in ring order")
# Graph knobs
parser.add_argument("--parallel_iterations", type=int, default=10,
help="number of parallel iterations in dynamic_rnn")
parser.add_argument("--use_dist_strategy", type="bool", default=False,
help="whether to use distribution strategy")
parser.add_argument(
"--hierarchical_copy",
type="bool",
default=False,
help="Use hierarchical copies. Currently only optimized for "
"use on a DGX-1 with 8 GPUs and may perform poorly on "
"other hardware. Requires --num_gpus > 1, and only "
"recommended when --num_gpus=8")
parser.add_argument(
"--network_topology",
type=constants.NetworkTopology,
default=constants.NetworkTopology.DGX1,
choices=list(constants.NetworkTopology))
parser.add_argument(
"--use_block_lstm",
type="bool",
default=False,
help="whether to use block lstm")
parser.add_argument(
"--use_defun",
type="bool",
default=False,
help="whether to use Defun")
# Gradient tricks
parser.add_argument(
"--gradient_repacking",
type=int,
default=0,
help="Use gradient repacking. It"
"currently only works with replicated mode. At the end of"
"of each step, it repacks the gradients for more efficient"
"cross-device transportation. A non-zero value specifies"
"the number of split packs that will be formed.")
parser.add_argument(
"--compact_gradient_transfer",
type="bool",
default=True,
help="Compact gradient as much as possible for cross-device transfer and "
"aggregation.")
parser.add_argument(
"--all_reduce_spec",
type=str,
default="nccl",
help="A specification of the all_reduce algorithm to be used "
"for reducing gradients. For more details, see "
"parse_all_reduce_spec in variable_mgr.py. An "
"all_reduce_spec has BNF form:\n"
"int ::= positive whole number\n"
"g_int ::= int[KkMGT]?\n"
"alg_spec ::= alg | alg#int\n"
"range_spec ::= alg_spec | alg_spec/alg_spec\n"
"spec ::= range_spec | range_spec:g_int:range_spec\n"
"NOTE: not all syntactically correct constructs are "
"supported.\n\n"
"Examples:\n "
"\"xring\" == use one global ring reduction for all "
"tensors\n"
"\"pscpu\" == use CPU at worker 0 to reduce all tensors\n"
"\"nccl\" == use NCCL to locally reduce all tensors. "
"Limited to 1 worker.\n"
"\"nccl/xring\" == locally (to one worker) reduce values "
"using NCCL then ring reduce across workers.\n"
"\"pscpu:32k:xring\" == use pscpu algorithm for tensors of "
"size up to 32kB, then xring for larger tensors.")
parser.add_argument(
"--agg_small_grads_max_bytes",
type=int,
default=0,
help="If > 0, try to aggregate tensors of less than this "
"number of bytes prior to all-reduce.")
parser.add_argument(
"--agg_small_grads_max_group",
type=int,
default=10,
help="When aggregating small tensors for all-reduce do not "
"aggregate more than this many into one new tensor.")
parser.add_argument(
"--allreduce_merge_scope",
type=int,
default=1,
help="Establish a name scope around this many "
"gradients prior to creating the all-reduce operations. "
"It may affect the ability of the backend to merge "
"parallel ops.")
# Other knobs
parser.add_argument(
"--local_parameter_device",
type=str,
default="gpu",
help="Device to use as parameter server: cpu or gpu. For "
"distributed training, it can affect where caching of "
"variables happens.")
parser.add_argument(
"--use_resource_vars",
type="bool",
default=False,
help="Use resource variables instead of normal variables. "
"Resource variables are slower, but this option is useful "
"for debugging their performance.")
parser.add_argument("--debug", type="bool", default=False,
help="Debug train and eval")
parser.add_argument(
"--debug_num_train_steps", type=int, default=None, help="Num steps to train.")
parser.add_argument("--show_metrics", type="bool", default=True,
help="whether to show detailed metrics")
parser.add_argument("--clip_grads", type="bool", default=True,
help="whether to clip gradients")
parser.add_argument("--profile", type="bool", default=False,
help="If generate profile")
parser.add_argument("--profile_save_steps", type=int, default=10,
help="Save timeline every N steps.")
parser.add_argument("--use_dynamic_rnn", type="bool", default=True)
parser.add_argument("--use_synthetic_data", type="bool", default=False)
parser.add_argument(
"--mode", type=str, default="train_and_eval",
choices=("train_and_eval", "infer", "translate"))
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=os.path.join(flags.data_dir, flags.train_prefix),
test_prefix=os.path.join(flags.data_dir, flags.test_prefix),
translate_file=flags.translate_file,
vocab_prefix=os.path.join(flags.data_dir, flags.vocab_prefix),
embed_prefix=flags.embed_prefix,
output_dir=flags.output_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
max_train_epochs=flags.max_train_epochs,
target_bleu=flags.target_bleu,
label_smoothing=flags.label_smoothing,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
# Data constraints
num_buckets=flags.num_buckets,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
ckpt=flags.ckpt,
infer_batch_size=flags.infer_batch_size,
detokenizer_file=flags.detokenizer_file if flags.detokenizer_file is not None \
else os.path.join(flags.data_dir, 'mosesdecoder/scripts/tokenizer/detokenizer.perl'),
tokenizer_file=flags.tokenizer_file if flags.tokenizer_file is not None \
else os.path.join(flags.data_dir, 'mosesdecoder/scripts/tokenizer/tokenizer.perl'),
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
save_checkpoints_steps=flags.save_checkpoints_steps,
log_step_count_steps=flags.log_step_count_steps,
epoch_step=0, # record where we were within an epoch.
share_vocab=flags.share_vocab,
random_seed=flags.random_seed,
language_model=flags.language_model,
amp=flags.amp,
use_fastmath=flags.use_fastmath,
use_fp16=flags.use_fp16,
fp16_loss_scale=flags.fp16_loss_scale,
enable_auto_loss_scale=flags.enable_auto_loss_scale,
fp16_inc_loss_scale_every_n=flags.fp16_inc_loss_scale_every_n,
check_tower_loss_numerics=flags.check_tower_loss_numerics,
use_fp32_batch_matmul=flags.use_fp32_batch_matmul,
# Performance
# GPU knbs
force_inputs_padding=flags.force_inputs_padding,
use_xla=flags.use_xla,
xla_compile=flags.xla_compile,
use_autojit_xla=flags.use_autojit_xla,
use_pintohost_optimizer=flags.use_pintohost_optimizer,
use_cudnn_lstm=flags.use_cudnn_lstm,
use_loose_bidi_cudnn_lstm=flags.use_loose_bidi_cudnn_lstm,
use_fused_lstm=flags.use_fused_lstm,
use_fused_lstm_dec=flags.use_fused_lstm_dec,
gpu_indices=flags.gpu_indices,
# Graph knobs
parallel_iterations=flags.parallel_iterations,
use_dynamic_rnn=flags.use_dynamic_rnn,
use_dist_strategy=flags.use_dist_strategy,
hierarchical_copy=flags.hierarchical_copy,
network_topology=flags.network_topology,
use_block_lstm=flags.use_block_lstm,
# Grad tricks
gradient_repacking=flags.gradient_repacking,
compact_gradient_transfer=flags.compact_gradient_transfer,
all_reduce_spec=flags.all_reduce_spec,
agg_small_grads_max_bytes=flags.agg_small_grads_max_bytes,
agg_small_grads_max_group=flags.agg_small_grads_max_group,
allreduce_merge_scope=flags.allreduce_merge_scope,
# Other knobs
local_parameter_device=("cpu" if flags.num_gpus ==0
else flags.local_parameter_device),
use_resource_vars=flags.use_resource_vars,
debug=flags.debug,
debug_num_train_steps=flags.debug_num_train_steps,
clip_grads=flags.clip_grads,
profile=flags.profile,
profile_save_steps=flags.profile_save_steps,
show_metrics=flags.show_metrics,
use_synthetic_data=flags.use_synthetic_data,
mode=flags.mode,
)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.mode == "translate" and not hparams.translate_file:
raise ValueError("--translate_file flag must be specified in translate mode")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if hparams.language_model:
hparams.attention = ""
hparams.attention_architecture = ""
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.output_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK,
pad_vocab=True)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.output_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
_add_argument(
hparams, "num_enc_emb_partitions", hparams.num_embeddings_partitions)
_add_argument(
hparams, "num_dec_emb_partitions", hparams.num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if hparams.embed_prefix:
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
# Evaluation
metric = "bleu"
best_metric_dir = os.path.join(hparams.output_dir, "best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "best_" + metric, 0, update=False)
_add_argument(hparams, "best_" + metric + "_dir", best_metric_dir)
return hparams
def create_or_load_hparams(default_hparams, hparams_path):
"""Create hparams or load hparams from output_dir."""
hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, estimator_fn):
"""Run main."""
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = flags.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
# Load hparams.
hparams = create_or_load_hparams(default_hparams, flags.hparams_path)
# Train or Evaluation
estimator_fn(hparams)
return hparams
def tokenize(hparams, file, tokenized_file):
utils.print_out("tokenizing {} -> {}".format(file, tokenized_file))
with open(file, 'rb') as input_file:
with open(tokenized_file, 'wb') as output_file:
subprocess.run([hparams.tokenizer_file, '-l', hparams.src], stdin=input_file, stdout=output_file)
def detokenize(hparams, file, detokenized_file):
utils.print_out("detokenizing {} -> {}".format(file, detokenized_file))
with open(file, 'rb') as input_file:
with open(detokenized_file, 'wb') as output_file:
subprocess.run([hparams.detokenizer_file, '-l', hparams.tgt], stdin=input_file, stdout=output_file)
def main(unused_argv):
experiment_start = time.time()
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.use_fp16 and FLAGS.use_dist_strategy:
raise ValueError("use_fp16 and use_dist_strategy aren't compatible")
if FLAGS.use_fp16 + FLAGS.amp + FLAGS.use_fastmath > 1:
raise ValueError("Only one of use_fp16, amp, use_fastmath can be set")
if FLAGS.amp:
utils.print_out('Enabling TF-AMP')
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
if FLAGS.use_fastmath:
utils.print_out('Enabling FastMath')
os.environ["TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32"] = '1'
os.environ["TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32"] = '1'
os.environ["TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32"] = '1'
# Set up hacky envvars.
# Hack that affects Defun in attention_wrapper.py
active_xla_option_nums = np.sum([FLAGS.use_xla, FLAGS.use_autojit_xla,
FLAGS.xla_compile])
if active_xla_option_nums > 1:
raise ValueError(
"Only one of use_xla, xla_compile, use_autojit_xla can be set")
os.environ["use_xla"] = str(FLAGS.use_xla).lower()
if FLAGS.use_xla:
os.environ["use_defun"] = str(True).lower()
else:
os.environ["use_defun"] = str(FLAGS.use_defun).lower()
utils.print_out("use_defun is %s for attention" % os.environ["use_defun"])
# TODO(jamesqin): retire this config after Cuda9.1
os.environ["use_fp32_batch_matmul"] = ("true" if FLAGS.use_fp32_batch_matmul
else "false")
os.environ["xla_compile"] = "true" if FLAGS.xla_compile else "false"
os.environ["force_inputs_padding"] = (
"true" if FLAGS.force_inputs_padding else "false")
if FLAGS.mode == "train":
utils.print_out("Running training mode.")
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams, estimator.train_fn)
elif FLAGS.mode == "infer" or FLAGS.mode == "translate":
if FLAGS.mode == "infer":
utils.print_out("Running inference mode.")
translate_mode = False
else:
utils.print_out("Running translate mode on file {}.".format(FLAGS.translate_file))
translate_mode = True
# Random
random_seed = FLAGS.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = FLAGS.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
dllogger.init(backends=[
dllogger.StdOutBackend(dllogger.Verbosity.DEFAULT),
dllogger.JSONStreamBackend(dllogger.Verbosity.VERBOSE, os.path.join(FLAGS.output_dir, FLAGS.mode + '-report.json')),
])
dllogger.log('PARAMETER', vars(FLAGS))
# Load hparams.
default_hparams = create_hparams(FLAGS)
default_hparams.num_buckets = 1
# The estimator model_fn is written in a way allowing train hparams to be
# passed in infer mode.
hparams = create_or_load_hparams(default_hparams, FLAGS.hparams_path)
utils.print_out("infer_hparams:")
utils.print_hparams(hparams)
if translate_mode:
tokenize(hparams, hparams.translate_file, hparams.translate_file + ".tok")
eval_sentences, eval_src_tokens, _ = iterator_utils.get_effective_epoch_size(hparams, train=False)
# Run evaluation when there's a new checkpoint
tf.logging.info("Starting to evaluate...")
eval_start = time.time()
_, (eval_speed, eval_latencies), eval_output_tokens = estimator.eval_fn(hparams, hparams.ckpt, only_translate=translate_mode)
eval_end = time.time()
eval_delta = eval_end - eval_start
utils.print_out("eval time for ckpt: %.2f mins (%.2f sent/sec, %.2f tokens/sec)" %
(eval_delta / 60., eval_speed, eval_speed * (eval_src_tokens + eval_output_tokens) / eval_sentences), f=sys.stderr)
logging_data = {
'infer_speed_sent': eval_speed,
'infer_speed_toks': eval_speed * (eval_src_tokens + eval_output_tokens) / eval_sentences,
}
for lat in sorted(eval_latencies):
utils.print_out("eval latency_%s for ckpt: %.2f ms" % (lat, eval_latencies[lat] * 1000))
logging_data['infer_latency_{}'.format(lat)] = eval_latencies[lat] * 1000
dllogger.log((), logging_data)
dllogger.flush()
if translate_mode:
detokenize(hparams, hparams.translate_file + ".trans.tok", hparams.translate_file + ".trans")
else:
assert FLAGS.mode == "train_and_eval"
utils.print_out("Running train and eval mode.")
# Random
random_seed = FLAGS.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = FLAGS.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
dllogger.init(backends=[
dllogger.StdOutBackend(dllogger.Verbosity.DEFAULT),
dllogger.JSONStreamBackend(dllogger.Verbosity.VERBOSE, os.path.join(FLAGS.output_dir, FLAGS.mode + '-report.json')),
])
dllogger.log('PARAMETER', vars(FLAGS))
dllogger.metadata("bleu", {"unit": None})
dllogger.metadata("train_speed_sent", {"unit": "sequences/s"})
dllogger.metadata("train_speed_toks", {"unit": "tokens/s"})
# Load hparams.
default_hparams = create_hparams(FLAGS)
hparams = create_or_load_hparams(default_hparams, FLAGS.hparams_path)
utils.print_out("training hparams:")
utils.print_hparams(hparams)
with tf.gfile.GFile(os.path.join(output_dir, "train_hparams.txt"), "w") as f:
f.write(utils.serialize_hparams(hparams) + "\n")
# The estimator model_fn is written in a way allowing train hparams to be
# passed in infer mode.
infer_hparams = tf.contrib.training.HParams(**hparams.values())
infer_hparams.num_buckets = 1
utils.print_out("infer_hparams:")
utils.print_hparams(infer_hparams)
with tf.gfile.GFile(os.path.join(output_dir, "infer_hparams.txt"), "w") as f:
f.write(utils.serialize_hparams(infer_hparams) + "\n")
epochs = 0
should_stop = epochs >= FLAGS.max_train_epochs
train_sentences, train_src_tokens, train_tgt_tokens = iterator_utils.get_effective_epoch_size(hparams)
eval_sentences, eval_src_tokens, _ = iterator_utils.get_effective_epoch_size(hparams, train=False)
while not should_stop:
utils.print_out("Starting epoch %d" % epochs)
try:
train_start = time.time()
train_speed, _ = estimator.train_fn(hparams)
except tf.errors.OutOfRangeError:
utils.print_out("training hits OutOfRangeError", f=sys.stderr)
train_end = time.time()
train_delta = train_end - train_start
utils.print_out("training time for epoch %d: %.2f mins (%.2f sent/sec, %.2f tokens/sec)" %
(epochs + 1, train_delta / 60., train_speed, train_speed * (train_src_tokens + train_tgt_tokens) / train_sentences), f=sys.stderr)
logging_data = {
'train_speed_sent': train_speed,
'train_speed_toks': train_speed * (train_src_tokens + train_tgt_tokens) / train_sentences,
}
# This is probably sub-optimal, doing eval per-epoch
eval_start = time.time()
bleu_score, (eval_speed, eval_latencies), eval_output_tokens = estimator.eval_fn(infer_hparams)
eval_end = time.time()
eval_delta = eval_end - eval_start
utils.print_out("eval time for epoch %d: %.2f mins (%.2f sent/sec, %.2f tokens/sec)" %
(epochs + 1, eval_delta / 60., eval_speed, eval_speed * (eval_src_tokens + eval_output_tokens) / eval_sentences), f=sys.stderr)
logging_data.update({
'bleu': bleu_score,
'infer_speed_sent': eval_speed,
'infer_speed_toks': eval_speed * (eval_src_tokens + eval_output_tokens) / eval_sentences,
})
for lat in sorted(eval_latencies):
utils.print_out("eval latency_%s for epoch %d: %.2f ms" % (lat, epochs + 1, eval_latencies[lat] * 1000))
logging_data['eval_latency_{}'.format(lat)] = eval_latencies[lat] * 1000
dllogger.log((epochs,), logging_data)
dllogger.flush()
if FLAGS.debug or (FLAGS.target_bleu is not None and bleu_score > FLAGS.target_bleu):
should_stop = True
utils.print_out(
"Stop job since target bleu is reached at epoch %d ." % epochs,
f=sys.stderr)
epochs += 1
if epochs >= FLAGS.max_train_epochs:
should_stop = True
utils.print_out("Stop job since max_train_epochs is reached.",
f=sys.stderr)
dllogger.log((), logging_data)
dllogger.flush()
experiment_end = time.time()
utils.print_out('Experiment took {} min'.format((experiment_end - experiment_start) / 60))
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | encoderInstance | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_ENCODERINSTANCE_H
#define TT2I_ENCODERINSTANCE_H
#include "binding.h"
#include "engineDriver.h"
#include "timedObject.h"
#include "trtPtr.h"
#include "NvInfer.h"
#include "cuda_runtime.h"
#include <string>
namespace tts
{
class EncoderInstance : public TimedObject, public EngineDriver
{
public:
/**
* @brief Tensor of shape {1 x INPUT_LENGTH}
*/
static constexpr const char* const INPUT_NAME = "input_encoder";
/**
* @brief Tensor of shape {1 x INPUT_LENGTH x 1}
*/
static constexpr const char* const INPUT_MASK_NAME = "input_encoder_mask";
/**
* @brief Tensor of shape {INPUT_LENGTH}
*/
static constexpr const char* const INPUT_LENGTH_NAME = "input_encoder_length";
/**
* @brief Tensor of shape {1 x INPUT_LENGTH x NUM_DIMENSIONS}
*/
static constexpr const char* const OUTPUT_NAME = "output_encoder";
/**
* @brief Tensor of shape {INPUT_LENGTH x NUM_PROCESSED_DIMENSIONS x 1 x 1}
*/
static constexpr const char* const OUTPUT_PROCESSED_NAME = "output_processed_encoder";
static constexpr const char* const ENGINE_NAME = "tacotron2_encoder";
/**
* @brief Create a new encoder instance.
*
* @param engine The TRT Engine implementing Tacotron2's encoder.
*/
EncoderInstance(TRTPtr<nvinfer1::ICudaEngine> engine);
// disable copying
EncoderInstance(const EncoderInstance& other) = delete;
EncoderInstance& operator=(const EncoderInstance& other) = delete;
/**
* @brief Perform inference.
*
* @param stream The CUDA stream.
* @param batchSize The size of the batch.
* @param inputDevice The input on the GPU.
* @param inputMaskDevice The input mask on the GPU (all 1's for the length of
* the actual input and all 0's for the length of the padding).
* @param inputLengthDevice The length of the input sequences on the GPU.
* @param outputDevice The output on the GPU (must be of input length x number
* of encoding dimensions).
* @param outputProcessedDevice The output on the GPU processed through the
* memory layer (must be of input length x number of processed dimensions).
*/
void infer(cudaStream_t stream, int batchSize, const int32_t* inputDevice, const float* inputMaskDevice,
const int32_t* inputLengthDevice, float* outputDevice, float* outputProcessedDevice);
/**
* @brief Get the length of input (padded size).
*
* @return The input length.
*/
int getInputLength() const;
/**
* @brief Get the number of encoding dimensions.
*
* @return The number of encoding dimensions.
*/
int getNumDimensions() const;
/**
* @brief Get the number of processed dimensions (attention).
*
* @return The number of processed dimensions.
*/
int getNumProcessedDimensions() const;
private:
Binding mBinding;
TRTPtr<nvinfer1::IExecutionContext> mContext;
int mInputLength;
};
} // namespace tts
#endif
|
PyTorch/Forecasting/TFT/triton/deployment_toolkit/model_analyzer | model_analyzer | exceptions | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ModelAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
PyTorch/Classification/ConvNets/scripts | scripts | sernxt_partial | FLAGS=$1
STAGE_ID=$2
STAGE_LEN=$3
python ./multiproc.py \
--nproc_per_node 8 \
./main.py /imagenet \
-j5 -p 100 \
--data-backend pytorch \
--raport-file report_$STAGE_ID.json \
--lr 1.024 \
--batch-size 128 \
--optimizer-batch-size 1024 \
--static-loss-scale 128 \
--warmup 8 \
--arch se-resnext101-32x4d -c fanin \
--label-smoothing 0.1 \
--lr-schedule cosine \
--mom 0.875 \
--wd 6.103515625e-05 \
--workspace /results \
--epochs 90 \
--run-epochs $STAGE_LEN \
$FLAGS \
--resume /results/checkpoint_$( expr $STAGE_ID - 1).pth.tar \
--checkpoint checkpoint_$STAGE_ID.pth.tar
|
PyTorch/SpeechRecognition/Jasper/triton/scripts | scripts | run_perf_client |
#!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
trap "exit" INT
SCRIPT_DIR=$(cd $(dirname $0); pwd)
PROJECT_DIR=${SCRIPT_DIR}/../..
TRITON_CLIENT_CONTAINER_TAG=${TRITON_CLIENT_CONTAINER_TAG:-jasper:triton}
SERVER_HOSTNAME=${SERVER_HOSTNAME:-localhost}
MODEL_NAME=${MODEL_NAME:-jasper-tensorrt-ensemble}
MODEL_VERSION=${MODEL_VERSION:-1}
BATCH_SIZE=${BATCH_SIZE:-1}
AUDIO_LENGTH=${AUDIO_LENGTH:-32000}
RESULT_DIR=${RESULT_DIR:-${PROJECT_DIR}/results}
MAX_LATENCY=${MAX_LATENCY:-500}
MAX_CONCURRENCY=${MAX_CONCURRENCY:-64}
MEASUREMENT_WINDOW=${MEASUREMENT_WINDOW:-3000}
TIMESTAMP=$(date "+%y%m%d_%H%M")
# RESULT_DIR_H is the path on the host, outside the container. Inside the container RESULT_DIR_H is always mounted to /results
RESULT_DIR_H="${RESULT_DIR}/perf_client/${MODEL_NAME}"
# Set the output folder using the first argument or pick a default
if [ -z ${1+x} ]; then
RESULT_DIR_H=${RESULT_DIR_H}/batch_${BATCH_SIZE}_len_${AUDIO_LENGTH}
else
RESULT_DIR_H=${RESULT_DIR_H}/"$1"
shift
fi
# Make the directory if it doesnt exist
if [ ! -d "${RESULT_DIR_H}" ]; then
mkdir -p "${RESULT_DIR_H}"
fi
echo "Saving output to ${RESULT_DIR_H}"
LOGNAME="${RESULT_DIR_H}/log_${TIMESTAMP}.log"
OUTPUT_FILE_CSV="results_${TIMESTAMP}.csv"
ARGS="\
-m ${MODEL_NAME} \
-x ${MODEL_VERSION} \
-p ${MEASUREMENT_WINDOW} \
-v \
-i gRPC \
-u ${SERVER_HOSTNAME}:8001 \
-b ${BATCH_SIZE} \
-l ${MAX_LATENCY} \
--max-threads ${MAX_CONCURRENCY} "
curl -s "http://${SERVER_HOSTNAME}:8000/api/status/${MODEL_NAME}" | grep ready_state | grep SERVER_READY || (echo "Model ${MODEL_NAME} is not ready, perf_client skipped..." && exit 1)
echo "=== STARTING: perf client ${ARGS} --concurrency-range 1:4:1 ==="
set -x
docker run -e DISPLAY=${DISPLAY} --runtime nvidia --rm \
--privileged --net=host \
-v ${RESULT_DIR_H}:/results --name jasper-perf-client \
${TRITON_CLIENT_CONTAINER_TAG} perf_client $ARGS -f /results/${OUTPUT_FILE_CSV}_p1 --shape AUDIO_SIGNAL:${AUDIO_LENGTH} --concurrency-range 1:4:1 2>&1 | tee -a $LOGNAME
set +x
echo "=== STARTING: perf client ${ARGS} --concurrency-range 8:${MAX_CONCURRENCY}:8 ==="
set -x
docker run -e DISPLAY=${DISPLAY} --runtime nvidia --rm \
--privileged --net=host \
-v ${RESULT_DIR_H}:/results --name jasper-perf-client \
${TRITON_CLIENT_CONTAINER_TAG} perf_client $ARGS -f /results/${OUTPUT_FILE_CSV}_p2 --shape AUDIO_SIGNAL:${AUDIO_LENGTH} --concurrency-range 8:${MAX_CONCURRENCY}:8 2>&1 | tee -a $LOGNAME
set +x
cat ${RESULT_DIR_H}/${OUTPUT_FILE_CSV}_p1 ${RESULT_DIR_H}/${OUTPUT_FILE_CSV}_p2 > ${RESULT_DIR_H}/${OUTPUT_FILE_CSV}
|
TensorFlow/Segmentation/UNet_Industrial/model/layers | layers | drop_layers | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model.layers.utils import _log_hparams
__all__ = ['dropout']
def dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None):
layer = tf.keras.layers.Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
net = layer.apply(inputs, training=training)
_log_hparams(
classname='Dropout',
layername=net.name,
noise_shape=noise_shape,
training=training,
seed=seed,
out_shape=str(net.get_shape()),
out_dtype=net.dtype
)
return net
|
TensorFlow/Translation/GNMT/scripts | scripts | filter_dataset | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import Counter
def parse_args():
parser = argparse.ArgumentParser(description='Clean dataset')
parser.add_argument('-f1', '--file1', help='file1')
parser.add_argument('-f2', '--file2', help='file2')
return parser.parse_args()
def save_output(fname, data):
with open(fname, 'w') as f:
f.writelines(data)
def main():
"""
Discards all pairs of sentences which can't be decoded by latin-1 encoder.
It aims to filter out sentences with rare unicode glyphs and pairs which
are most likely not valid English-German sentences.
Examples of discarded sentences:
✿★★★Hommage au king de la pop ★★★✿ ✿★★★Que son âme repos...
Для их осуществления нам, прежде всего, необходимо преодолеть
возражения рыночных фундаменталистов, которые хотят ликвидировать или
уменьшить роль МВФ.
practised as a scientist in various medical departments of the ⇗Medical
University of Hanover , the ⇗University of Ulm , and the ⇗RWTH Aachen
(rheumatology, pharmacology, physiology, pathology, microbiology,
immunology and electron-microscopy).
The same shift】 and press 【】 【alt out with a smaller diameter
circle.
Brought to you by ABMSUBS ♥leira(Coordinator/Translator)
♥chibichan93(Timer/Typesetter) ♥ja...
Some examples: &0u - ☺ &0U - ☻ &tel - ☏ &PI - ¶ &SU - ☼ &cH- - ♥ &M2=♫
&sn - ﺵ SGML maps SGML to unicode.
"""
args = parse_args()
c = Counter()
skipped = 0
valid = 0
data1 = []
data2 = []
with open(args.file1) as f1, open(args.file2) as f2:
for idx, lines in enumerate(zip(f1, f2)):
line1, line2 = lines
if idx % 100000 == 1:
print('Processed {} lines'.format(idx))
try:
line1.encode('latin1')
line2.encode('latin1')
except UnicodeEncodeError:
skipped += 1
else:
data1.append(line1)
data2.append(line2)
valid += 1
c.update(line1)
ratio = valid / (skipped + valid)
print('Skipped: {}, Valid: {}, Valid ratio {}'.format(skipped, valid, ratio))
print('Character frequency:', c)
save_output(args.file1, data1)
save_output(args.file2, data2)
if __name__ == '__main__':
main()
|
TensorFlow/Segmentation/UNet_Medical/utils | utils | setup | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import dllogger as logger
import tensorflow as tf
import horovod.tensorflow as hvd
import numpy as np
from dllogger import StdOutBackend, Verbosity, JSONStreamBackend
from utils.model_fn import unet_fn
def set_flags():
os.environ['CUDA_CACHE_DISABLE'] = '1'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
def prepare_model_dir(params):
model_dir = os.path.join(params.model_dir, "model_checkpoint")
model_dir = model_dir if (hvd.rank() == 0 and not params.benchmark) else None
if model_dir is not None:
os.makedirs(model_dir, exist_ok=True)
if ('train' in params.exec_mode) and (not params.resume_training):
os.system('rm -rf {}/*'.format(model_dir))
return model_dir
def build_estimator(params, model_dir):
if params.use_amp:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
else:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
np.random.seed(params.seed)
tf.compat.v1.random.set_random_seed(params.seed)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
gpu_options = tf.compat.v1.GPUOptions()
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
if params.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
run_config = tf.estimator.RunConfig(
save_summary_steps=1,
tf_random_seed=params.seed,
session_config=config,
save_checkpoints_steps=(params.max_steps // hvd.size()) if hvd.rank() == 0 else None,
keep_checkpoint_max=1)
estimator = tf.estimator.Estimator(
model_fn=unet_fn,
model_dir=model_dir,
config=run_config,
params=params)
return estimator
def get_logger(params):
backends = []
if hvd.rank() == 0:
backends += [StdOutBackend(Verbosity.VERBOSE)]
if params.log_dir:
backends += [JSONStreamBackend(Verbosity.VERBOSE, params.log_dir)]
logger.init(backends=backends)
logger.metadata("eval_dice_score", {"unit": None})
logger.metadata("throughput_test", {"unit": "images/s"})
logger.metadata("throughput_train", {"unit": "images/s"})
return logger
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers | layers | dense_einsum_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based einsum layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.layers import dense_einsum
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class DenseEinsumLayer(keras_parameterized.TestCase):
def test_3D_einsum_with_two_bound_dimensions(self):
test_layer = dense_einsum.DenseEinsum(
output_shape=(64,), num_summed_dimensions=2)
# Create a 4-dimensional input (the first dimension is implicit).
input_tensor = tf.keras.Input(shape=(None, 40, 80))
_ = test_layer(input_tensor)
self.assertEqual(test_layer._einsum_string, "abcd,cde->abe")
self.assertEqual(test_layer._kernel_shape, (40, 80, 64))
def test_3D_einsum_with_one_bound_dimensions(self):
test_layer = dense_einsum.DenseEinsum(
output_shape=(64, 32), num_summed_dimensions=1)
# Create a 3-dimensional input (the first dimension is implicit).
input_tensor = tf.keras.Input(shape=(None, 80))
_ = test_layer(input_tensor)
self.assertEqual(test_layer._einsum_string, "abc,cde->abde")
self.assertEqual(test_layer._kernel_shape, (80, 64, 32))
def test_2D_einsum_with_one_bound_dimensions(self):
test_layer = dense_einsum.DenseEinsum(
output_shape=(64,), num_summed_dimensions=1)
# Create a 3-dimensional input (the first dimension is implicit).
input_tensor = tf.keras.Input(shape=(None, 80))
_ = test_layer(input_tensor)
self.assertEqual(test_layer._einsum_string, "abc,cd->abd")
self.assertEqual(test_layer._kernel_shape, (80, 64))
def test_bias_term_can_be_disabled(self):
# A layer created using the bias should have two weights.
test_layer = dense_einsum.DenseEinsum(
output_shape=64, num_summed_dimensions=1, use_bias=True)
input_tensor = tf.keras.Input(shape=(None, 80))
_ = test_layer(input_tensor)
self.assertEqual(2, len(test_layer.get_weights()))
# A layer created without the bias should have only one weight.
test_layer = dense_einsum.DenseEinsum(
output_shape=64, num_summed_dimensions=1, use_bias=False)
input_tensor = tf.keras.Input(shape=(None, 80))
_ = test_layer(input_tensor)
self.assertEqual(1, len(test_layer.get_weights()))
def test_activation(self):
# Create a model that does not use an activation.
no_activation_layer = dense_einsum.DenseEinsum(
output_shape=64, num_summed_dimensions=1, activation=None)
input_tensor = tf.keras.Input(shape=(None, 80))
output_tensor = no_activation_layer(input_tensor)
no_activation_model = tf.keras.Model(input_tensor, output_tensor)
# Create a model that uses a softmax activation.
activation_layer = dense_einsum.DenseEinsum(
output_shape=64, num_summed_dimensions=1, activation="softmax")
input_tensor = tf.keras.Input(shape=(None, 80))
output_tensor = activation_layer(input_tensor)
activation_model = tf.keras.Model(input_tensor, output_tensor)
# Make sure the models' weights are identical.
activation_model.set_weights(no_activation_model.get_weights())
# Predict using each model on the same input data. The output should be
# different, since one is using a softmax - even though the models' weights
# are the same.
input_values = 10 * np.random.random_sample((10, 4, 80))
non_activated_data = no_activation_model.predict(input_values)
activated_data = activation_model.predict(input_values)
self.assertNotAllClose(activated_data, non_activated_data)
def test_non_iterable_output_shape(self):
test_layer = dense_einsum.DenseEinsum(
output_shape=64, num_summed_dimensions=1)
# Create a 3-dimensional input (the first dimension is implicit).
input_tensor = tf.keras.Input(shape=(None, 80))
_ = test_layer(input_tensor)
self.assertEqual(test_layer._einsum_string, "abc,cd->abd")
self.assertEqual(test_layer._kernel_shape, (80, 64))
def test_with_explicit_initializer(self):
test_layer = dense_einsum.DenseEinsum(
output_shape=(64,),
num_summed_dimensions=2,
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 4-dimensional input (the first dimension is implicit).
input_tensor = tf.keras.Input(shape=(None, 40, 80))
_ = test_layer(input_tensor)
self.assertEqual(test_layer._einsum_string, "abcd,cde->abe")
self.assertEqual(test_layer._kernel_shape, (40, 80, 64))
if __name__ == "__main__":
tf.test.main()
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc | preproc | spark_data_utils | # Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
from argparse import ArgumentParser
from collections import OrderedDict
from contextlib import contextmanager
from operator import itemgetter
from time import time
from pyspark import broadcast
from pyspark.sql import Row, SparkSession, Window
from pyspark.sql.functions import *
from pyspark.sql.types import *
LABEL_COL = 0
INT_COLS = list(range(1, 14))
CAT_COLS = list(range(14, 40))
def get_column_counts_with_frequency_limit(df, frequency_limit = None):
cols = ['_c%d' % i for i in CAT_COLS]
df = (df
.select(posexplode(array(*cols)))
.withColumnRenamed('pos', 'column_id')
.withColumnRenamed('col', 'data')
.filter('data is not null')
.groupBy('column_id', 'data')
.count())
if frequency_limit:
frequency_limit = frequency_limit.split(",")
exclude = []
default_limit = None
for fl in frequency_limit:
frequency_pair = fl.split(":")
if len(frequency_pair) == 1:
default_limit = int(frequency_pair[0])
elif len(frequency_pair) == 2:
df = df.filter((col('column_id') != int(frequency_pair[0]) - CAT_COLS[0]) | (col('count') >= int(frequency_pair[1])))
exclude.append(int(frequency_pair[0]))
if default_limit:
remain = [x - CAT_COLS[0] for x in CAT_COLS if x not in exclude]
df = df.filter((~col('column_id').isin(remain)) | (col('count') >= default_limit))
# for comparing isin and separate filter
# for i in remain:
# df = df.filter((col('column_id') != i - CAT_COLS[0]) | (col('count') >= default_limit))
return df
def assign_id_with_window(df):
windowed = Window.partitionBy('column_id').orderBy(desc('count'))
return (df
.withColumn('id', row_number().over(windowed))
.withColumnRenamed('count', 'model_count'))
def assign_low_mem_partial_ids(df):
# To avoid some scaling issues with a simple window operation, we use a more complex method
# to compute the same thing, but in a more distributed spark specific way
df = df.orderBy(asc('column_id'), desc('count'))
# The monotonically_increasing_id is the partition id in the top 31 bits and the rest
# is an increasing count of the rows within that partition. So we split it into two parts,
# the partion id part_id and the count mono_id
df = df.withColumn('part_id', spark_partition_id())
return df.withColumn('mono_id', monotonically_increasing_id() - shiftLeft(col('part_id'), 33))
def assign_low_mem_final_ids(df):
# Now we can find the minimum and maximum mono_ids within a given column/partition pair
sub_model = df.groupBy('column_id', 'part_id').agg(max('mono_id').alias('top'), min('mono_id').alias('bottom'))
sub_model = sub_model.withColumn('diff', col('top') - col('bottom') + 1)
sub_model = sub_model.drop('top')
# This window function is over aggregated column/partition pair table. It will do a running sum of the rows
# within that column
windowed = Window.partitionBy('column_id').orderBy('part_id').rowsBetween(Window.unboundedPreceding, -1)
sub_model = sub_model.withColumn('running_sum', sum('diff').over(windowed)).na.fill(0, ["running_sum"])
joined = df.withColumnRenamed('column_id', 'i_column_id')
joined = joined.withColumnRenamed('part_id', 'i_part_id')
joined = joined.withColumnRenamed('count', 'model_count')
# Then we can join the original input with the pair it is a part of
joined = joined.join(sub_model, (col('i_column_id') == col('column_id')) & (col('part_id') == col('i_part_id')))
# So with all that we can subtract bottom from mono_id makeing it start at 0 for each partition
# and then add in the running_sum so the id is contiguous and unique for the entire column. + 1 to make it match the 1 based indexing
# for row_number
ret = joined.select(col('column_id'),
col('data'),
(col('mono_id') - col('bottom') + col('running_sum') + 1).cast(IntegerType()).alias('id'),
col('model_count'))
return ret
def get_column_models(combined_model):
for i in CAT_COLS:
model = (combined_model
.filter('column_id == %d' % (i - CAT_COLS[0]))
.drop('column_id'))
yield i, model
def col_of_rand_long():
return (rand() * (1 << 52)).cast(LongType())
def skewed_join(df, model, col_name, cutoff):
# Most versions of spark don't have a good way
# to deal with a skewed join out of the box.
# Some do and if you want to replace this with
# one of those that would be great.
# Because we have statistics about the skewedness
# that we can used we divide the model up into two parts
# one part is the highly skewed part and we do a
# broadcast join for that part, but keep the result in
# a separate column
b_model = broadcast(model.filter(col('model_count') >= cutoff)
.withColumnRenamed('data', col_name)
.drop('model_count'))
df = (df
.join(b_model, col_name, how='left')
.withColumnRenamed('id', 'id_tmp'))
# We also need to spread the skewed data that matched
# evenly. We will use a source of randomness for this
# but use a -1 for anything that still needs to be matched
if 'ordinal' in df.columns:
rand_column = col('ordinal')
else:
rand_column = col_of_rand_long()
df = df.withColumn('join_rand',
# null values are not in the model, they are filtered out
# but can be a source of skewedness so include them in
# the even distribution
when(col('id_tmp').isNotNull() | col(col_name).isNull(), rand_column)
.otherwise(lit(-1)))
# Null out the string data that already matched to save memory
df = df.withColumn(col_name,
when(col('id_tmp').isNotNull(), None)
.otherwise(col(col_name)))
# Now do the second join, which will be a non broadcast join.
# Sadly spark is too smart for its own good and will optimize out
# joining on a column it knows will always be a constant value.
# So we have to make a convoluted version of assigning a -1 to the
# randomness column for the model itself to work around that.
nb_model = (model
.withColumn('join_rand', when(col('model_count') < cutoff, lit(-1)).otherwise(lit(-2)))
.filter(col('model_count') < cutoff)
.withColumnRenamed('data', col_name)
.drop('model_count'))
df = (df
.join(nb_model, ['join_rand', col_name], how='left')
.drop(col_name, 'join_rand')
# Pick either join result as an answer
.withColumn(col_name, coalesce(col('id'), col('id_tmp')))
.drop('id', 'id_tmp'))
return df
def apply_models(df, models, broadcast_model = False, skew_broadcast_pct = 1.0):
# sort the models so broadcast joins come first. This is
# so we reduce the amount of shuffle data sooner than later
# If we parsed the string hex values to ints early on this would
# not make a difference.
models = sorted(models, key=itemgetter(3), reverse=True)
for i, model, original_rows, would_broadcast in models:
col_name = '_c%d' % i
if not (would_broadcast or broadcast_model):
# The data is highly skewed so we need to offset that
cutoff = int(original_rows * skew_broadcast_pct/100.0)
df = skewed_join(df, model, col_name, cutoff)
else:
# broadcast joins can handle skewed data so no need to
# do anything special
model = (model.drop('model_count')
.withColumnRenamed('data', col_name))
model = broadcast(model) if broadcast_model else model
df = (df
.join(model, col_name, how='left')
.drop(col_name)
.withColumnRenamed('id', col_name))
return df.fillna(0, ['_c%d' % i for i in CAT_COLS])
def transform_log(df, transform_log = False):
cols = ['_c%d' % i for i in INT_COLS]
if transform_log:
for col_name in cols:
df = df.withColumn(col_name, log(df[col_name] + 3))
return df.fillna(0, cols)
def would_broadcast(spark, str_path):
sc = spark.sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(str_path)
fs = sc._jvm.org.apache.hadoop.fs.FileSystem.get(config)
stat = fs.listFiles(path, True)
sum = 0
while stat.hasNext():
sum = sum + stat.next().getLen()
sql_conf = sc._jvm.org.apache.spark.sql.internal.SQLConf()
cutoff = sql_conf.autoBroadcastJoinThreshold() * sql_conf.fileCompressionFactor()
return sum <= cutoff
def delete_data_source(spark, path):
sc = spark.sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(path)
sc._jvm.org.apache.hadoop.fs.FileSystem.get(config).delete(path, True)
def load_raw(spark, folder, day_range):
label_fields = [StructField('_c%d' % LABEL_COL, IntegerType())]
int_fields = [StructField('_c%d' % i, IntegerType()) for i in INT_COLS]
str_fields = [StructField('_c%d' % i, StringType()) for i in CAT_COLS]
schema = StructType(label_fields + int_fields + str_fields)
paths = [os.path.join(folder, 'day_%d' % i) for i in day_range]
return (spark
.read
.schema(schema)
.option('sep', '\t')
.csv(paths))
def rand_ordinal(df):
# create a random long from the double precision float.
# The fraction part of a double is 52 bits, so we try to capture as much
# of that as possible
return df.withColumn('ordinal', col_of_rand_long())
def day_from_ordinal(df, num_days):
return df.withColumn('day', (col('ordinal') % num_days).cast(IntegerType()))
def day_from_input_file(df):
return df.withColumn('day', substring_index(input_file_name(), '_', -1).cast(IntegerType()))
def psudo_sort_by_day_plus(spark, df, num_days):
# Sort is very expensive because it needs to calculate the partitions
# which in our case may involve rereading all of the data. In some cases
# we can avoid this by repartitioning the data and sorting within a single partition
shuffle_parts = int(spark.conf.get('spark.sql.shuffle.partitions'))
extra_parts = int(shuffle_parts/num_days)
if extra_parts <= 0:
df = df.repartition('day')
else:
#We want to spread out the computation to about the same amount as shuffle_parts
divided = (col('ordinal') / num_days).cast(LongType())
extra_ident = divided % extra_parts
df = df.repartition(col('day'), extra_ident)
return df.sortWithinPartitions('day', 'ordinal')
def load_combined_model(spark, model_folder):
path = os.path.join(model_folder, 'combined.parquet')
return spark.read.parquet(path)
def save_combined_model(df, model_folder, mode=None):
path = os.path.join(model_folder, 'combined.parquet')
df.write.parquet(path, mode=mode)
def delete_combined_model(spark, model_folder):
path = os.path.join(model_folder, 'combined.parquet')
delete_data_source(spark, path)
def load_low_mem_partial_ids(spark, model_folder):
path = os.path.join(model_folder, 'partial_ids.parquet')
return spark.read.parquet(path)
def save_low_mem_partial_ids(df, model_folder, mode=None):
path = os.path.join(model_folder, 'partial_ids.parquet')
df.write.parquet(path, mode=mode)
def delete_low_mem_partial_ids(spark, model_folder):
path = os.path.join(model_folder, 'partial_ids.parquet')
delete_data_source(spark, path)
def load_column_models(spark, model_folder, count_required):
for i in CAT_COLS:
path = os.path.join(model_folder, '%d.parquet' % i)
df = spark.read.parquet(path)
if count_required:
values = df.agg(sum('model_count').alias('sum'), count('*').alias('size')).collect()
else:
values = df.agg(sum('model_count').alias('sum')).collect()
yield i, df, values[0], would_broadcast(spark, path)
def save_column_models(column_models, model_folder, mode=None):
for i, model in column_models:
path = os.path.join(model_folder, '%d.parquet' % i)
model.write.parquet(path, mode=mode)
def save_model_size(model_size, path, write_mode):
if os.path.exists(path) and write_mode == 'errorifexists':
print('Error: model size file %s exists' % path)
sys.exit(1)
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
with open(path, 'w') as fp:
json.dump(model_size, fp, indent=4)
_benchmark = {}
@contextmanager
def _timed(step):
start = time()
yield
end = time()
_benchmark[step] = end - start
def _parse_args():
parser = ArgumentParser()
parser.add_argument(
'--mode',
required=True,
choices=['generate_models', 'transform'])
parser.add_argument('--days', required=True)
parser.add_argument('--input_folder', required=True)
parser.add_argument('--output_folder')
parser.add_argument('--model_size_file')
parser.add_argument('--model_folder', required=True)
parser.add_argument(
'--write_mode',
choices=['overwrite', 'errorifexists'],
default='errorifexists')
parser.add_argument('--frequency_limit')
parser.add_argument('--no_numeric_log_col', action='store_true')
#Support for running in a lower memory environment
parser.add_argument('--low_mem', action='store_true')
parser.add_argument(
'--output_ordering',
choices=['total_random', 'day_random', 'any', 'input'],
default='total_random')
parser.add_argument(
'--output_partitioning',
choices=['day', 'none'],
default='none')
parser.add_argument('--dict_build_shuffle_parallel_per_day', type=int, default=2)
parser.add_argument('--apply_shuffle_parallel_per_day', type=int, default=25)
parser.add_argument('--skew_broadcast_pct', type=float, default=1.0)
parser.add_argument('--debug_mode', action='store_true')
args = parser.parse_args()
start, end = args.days.split('-')
args.day_range = list(range(int(start), int(end) + 1))
args.days = len(args.day_range)
return args
def _main():
args = _parse_args()
spark = SparkSession.builder.getOrCreate()
df = load_raw(spark, args.input_folder, args.day_range)
if args.mode == 'generate_models':
spark.conf.set('spark.sql.shuffle.partitions', args.days * args.dict_build_shuffle_parallel_per_day)
with _timed('generate models'):
col_counts = get_column_counts_with_frequency_limit(df, args.frequency_limit)
if args.low_mem:
# in low memory mode we have to save an intermediate result
# because if we try to do it in one query spark ends up assigning the
# partial ids in two different locations that are not guaranteed to line up
# this prevents that from happening by assigning the partial ids
# and then writeing them out.
save_low_mem_partial_ids(
assign_low_mem_partial_ids(col_counts),
args.model_folder,
args.write_mode)
save_combined_model(
assign_low_mem_final_ids(load_low_mem_partial_ids(spark, args.model_folder)),
args.model_folder,
args.write_mode)
if not args.debug_mode:
delete_low_mem_partial_ids(spark, args.model_folder)
else:
save_combined_model(
assign_id_with_window(col_counts),
args.model_folder,
args.write_mode)
save_column_models(
get_column_models(load_combined_model(spark, args.model_folder)),
args.model_folder,
args.write_mode)
if not args.debug_mode:
delete_combined_model(spark, args.model_folder)
if args.mode == 'transform':
spark.conf.set('spark.sql.shuffle.partitions', args.days * args.apply_shuffle_parallel_per_day)
with _timed('transform'):
if args.output_ordering == 'total_random':
df = rand_ordinal(df)
if args.output_partitioning == 'day':
df = day_from_ordinal(df, args.days)
elif args.output_ordering == 'day_random':
df = rand_ordinal(df)
df = day_from_input_file(df)
elif args.output_ordering == 'input':
df = df.withColumn('ordinal', monotonically_increasing_id())
if args.output_partitioning == 'day':
df = day_from_input_file(df)
else: # any ordering
if args.output_partitioning == 'day':
df = day_from_input_file(df)
models = list(load_column_models(spark, args.model_folder, bool(args.model_size_file)))
if args.model_size_file:
save_model_size(
OrderedDict(('_c%d' % i, agg.size) for i, _, agg, _ in models),
args.model_size_file,
args.write_mode)
models = [(i, df, agg.sum, flag) for i, df, agg, flag in models]
df = apply_models(
df,
models,
not args.low_mem,
args.skew_broadcast_pct)
df = transform_log(df, not args.no_numeric_log_col)
if args.output_partitioning == 'day':
partitionBy = 'day'
else:
partitionBy = None
if args.output_ordering == 'total_random':
if args.output_partitioning == 'day':
df = psudo_sort_by_day_plus(spark, df, args.days)
else: # none
# Don't do a full sort it is expensive. Order is random so
# just make it random
df = df.repartition('ordinal').sortWithinPartitions('ordinal')
df = df.drop('ordinal')
elif args.output_ordering == 'day_random':
df = psudo_sort_by_day_plus(spark, df, args.days)
df = df.drop('ordinal')
if args.output_partitioning != 'day':
df = df.drop('day')
elif args.output_ordering == 'input':
if args.low_mem:
# This is the slowest option. We totally messed up the order so we have to put
# it back in the correct order
df = df.orderBy('ordinal')
else:
# Applying the dictionary happened within a single task so we are already really
# close to the correct order, just need to sort within the partition
df = df.sortWithinPartitions('ordinal')
df = df.drop('ordinal')
if args.output_partitioning != 'day':
df = df.drop('day')
# else: any ordering so do nothing the ordering does not matter
df.write.parquet(
args.output_folder,
mode=args.write_mode,
partitionBy=partitionBy)
print('=' * 100)
print(_benchmark)
if __name__ == '__main__':
_main()
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | inception | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Brings all inception models under one namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from nets.inception_resnet_v2 import inception_resnet_v2
from nets.inception_resnet_v2 import inception_resnet_v2_arg_scope
from nets.inception_resnet_v2 import inception_resnet_v2_base
from nets.inception_v1 import inception_v1
from nets.inception_v1 import inception_v1_arg_scope
from nets.inception_v1 import inception_v1_base
from nets.inception_v2 import inception_v2
from nets.inception_v2 import inception_v2_arg_scope
from nets.inception_v2 import inception_v2_base
from nets.inception_v3 import inception_v3
from nets.inception_v3 import inception_v3_arg_scope
from nets.inception_v3 import inception_v3_base
from nets.inception_v4 import inception_v4
from nets.inception_v4 import inception_v4_arg_scope
from nets.inception_v4 import inception_v4_base
# pylint: enable=unused-import
|
PyTorch/SpeechSynthesis/FastPitch/platform | platform | DGX1_FastPitch_FP32_4GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=4}
: ${BATCH_SIZE:=16}
: ${GRAD_ACCUMULATION:=4}
: ${AMP:=false}
bash scripts/train.sh "$@"
|
TensorFlow/LanguageModeling/BERT/scripts | scripts | run_glue | #!/usr/bin/env bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Container nvidia build = " $NVIDIA_BUILD_ID
task_name=${1:-"MRPC"}
batch_size=${2:-"16"}
learning_rate=${3:-"3e-6"}
precision=${4:-"fp16"}
use_xla=${5:-"true"}
num_gpu=${6:-"8"}
seq_length=${7:-"128"}
doc_stride=${8:-"64"}
bert_model=${9:-"large"}
if [ "$bert_model" = "large" ] ; then
export BERT_DIR=data/download/nvidia_pretrained/bert_tf_pretraining_large_lamb
else
export BERT_DIR=data/download/google_pretrained_weights/uncased_L-12_H-768_A-12
fi
export GLUE_DIR=data/download
epochs=${10:-"3.0"}
ws=${11:-"0.1"}
init_checkpoint=${12:-"$BERT_DIR/model.ckpt"}
echo "GLUE directory set as " $GLUE_DIR " BERT directory set as " $BERT_DIR
use_fp16=""
if [ "$precision" = "fp16" ] ; then
echo "fp16 activated!"
use_fp16="--amp"
else
echo "fp32/tf32 activated!"
use_fp16="--noamp"
fi
if [ "$use_xla" = "true" ] ; then
use_xla_tag="--use_xla"
echo "XLA activated"
else
use_xla_tag="--nouse_xla"
fi
if [ $num_gpu -gt 1 ] ; then
mpi_command="mpirun -np $num_gpu -H localhost:$num_gpu \
--allow-run-as-root -bind-to none -map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH -mca pml ob1 -mca btl ^openib"
else
mpi_command=""
fi
export GBS=$(expr $batch_size \* $num_gpu)
printf -v TAG "tf_bert_finetuning_glue_%s_%s_%s_gbs%d" "$task_name" "$bert_model" "$precision" $GBS
DATESTAMP=`date +'%y%m%d%H%M%S'`
#Edit to save logs & checkpoints in a different directory
RESULTS_DIR=/results/${TAG}_${DATESTAMP}
LOGFILE=$RESULTS_DIR/$TAG.$DATESTAMP.log
mkdir -m 777 -p $RESULTS_DIR
printf "Saving checkpoints to %s\n" "$RESULTS_DIR"
printf "Logs written to %s\n" "$LOGFILE"
#Check if all necessary files are available before training
for DIR_or_file in $GLUE_DIR/${task_name} $RESULTS_DIR $BERT_DIR/vocab.txt $BERT_DIR/bert_config.json; do
echo $DIR_or_file
if [ ! -d "$DIR_or_file" ] && [ ! -f "$DIR_or_file" ]; then
echo "Error! $DIR_or_file directory missing. Please mount correctly"
exit -1
fi
done
$mpi_command python run_classifier.py \
--task_name=$task_name \
--do_train=true \
--do_eval=true \
--data_dir=$GLUE_DIR/$task_name \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint=$init_checkpoint \
--max_seq_length=$seq_length \
--doc_stride=$doc_stride \
--train_batch_size=$batch_size \
--learning_rate=$learning_rate \
--num_train_epochs=$epochs \
--output_dir=$RESULTS_DIR \
--horovod "$use_fp16" \
$use_xla_tag --warmup_proportion=$ws |& tee $LOGFILE |
TensorFlow/Classification/ConvNets/triton | triton | requirements | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
networkx==2.5
onnx>=1.8.0
onnxruntime>=1.9.0
pycuda>=2019.1.2
PyYAML>=5.2
tqdm>=4.44.1
tf2onnx==1.8.3
tabulate>=0.8.7
natsort>=7.0.0
# use tags instead of branch names - because there might be docker cache hit causing not fetching most recent changes on branch
model_navigator @ git+https://github.com/triton-inference-server/[email protected]#egg=model_navigator
|
TensorFlow2/LanguageModeling/BERT/data | data | PubMedTextFormatting | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import pubmed_parser as pmp
class PubMedTextFormatting:
def __init__(self, pubmed_path, output_filename, recursive = False):
self.pubmed_path = pubmed_path
self.recursive = recursive
self.output_filename = output_filename
# This puts one article per line
def merge(self):
print('PubMed path:', self.pubmed_path)
with open(self.output_filename, mode='w', newline='\n') as ofile:
for filename in glob.glob(self.pubmed_path + '/*.xml*', recursive=self.recursive):
print('file:', filename)
dicts_out = pmp.parse_medline_xml(filename)
for dict_out in dicts_out:
if not dict_out['abstract']:
continue
try:
for line in dict_out['abstract'].splitlines():
if len(line) < 30:
continue
ofile.write(line.strip() + " ")
ofile.write("\n\n")
except:
ofile.write("\n\n")
continue
|
PyTorch/SpeechRecognition/Jasper/triton/model_repo_configs/fp32/feature-extractor-ts-trace | feature-extractor-ts-trace | config | name: "feature-extractor-ts-trace"
platform: "pytorch_libtorch"
default_model_filename: "model.pt"
max_batch_size: 64
input [
{
name: "input__0"
data_type: TYPE_FP32
dims: [ -1 ]
},
{
name: "input__1"
data_type: TYPE_INT32
dims: [ 1 ]
reshape { shape: [] }
}
]
output [
{
name: "output__0"
data_type: TYPE_FP32
dims: [64, -1]
},
{
name: "output__1"
data_type: TYPE_INT32
dims: [ 1 ]
reshape: { shape: [] }
}
]
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks | networks | bert_pretrainer | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trainer network for BERT-style models."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import copy
import tensorflow as tf
from official.nlp.modeling import networks
@tf.keras.utils.register_keras_serializable(package='Text')
class BertPretrainer(tf.keras.Model):
"""BERT network training model.
This is an implementation of the network structure surrounding a transformer
encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers
for Language Understanding" (https://arxiv.org/abs/1810.04805).
The BertTrainer allows a user to pass in a transformer stack, and instantiates
the masked language model and classification networks that are used to create
the training objectives.
Attributes:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a "get_embedding_table" method.
num_classes: Number of classes to predict from the classification network.
num_token_predictions: Number of tokens to predict from the masked LM.
activation: The activation (if any) to use in the masked LM and
classification networks. If None, no activation will be used.
initializer: The initializer (if any) to use in the masked LM and
classification networks. Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
network,
num_classes,
num_token_predictions,
float_type,
activation=None,
output_activation=None,
initializer='glorot_uniform',
output='logits',
**kwargs):
self._self_setattr_tracking = False
self._config = {
'network': network,
'num_classes': num_classes,
'num_token_predictions': num_token_predictions,
'activation': activation,
'output_activation': output_activation,
'initializer': initializer,
'output': output,
}
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a copy of the network inputs for use
# when we construct the Model object at the end of init. (We keep a copy
# because we'll be adding another tensor to the copy later.)
network_inputs = network.inputs
inputs = copy.copy(network_inputs)
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
# Note that, because of how deferred construction happens, we can't use
# the copy of the list here - by the time the network is invoked, the list
# object contains the additional input added below.
sequence_output, cls_output = network(network_inputs)
sequence_output_length = sequence_output.shape.as_list()[1]
if sequence_output_length < num_token_predictions:
raise ValueError(
"The passed network's output length is %s, which is less than the "
'requested num_token_predictions %s.' %
(sequence_output_length, num_token_predictions))
masked_lm_positions = tf.keras.layers.Input(
shape=(num_token_predictions,),
name='masked_lm_positions',
dtype=tf.int32)
inputs.append(masked_lm_positions)
self.masked_lm = networks.MaskedLM(
num_predictions=num_token_predictions,
input_width=sequence_output.shape[-1],
source_network=network,
float_type=float_type,
activation=activation,
initializer=initializer,
output=output,
name='masked_lm')
lm_outputs = self.masked_lm([sequence_output, masked_lm_positions])
self.classification = networks.Classification(
input_width=cls_output.shape[-1],
num_classes=num_classes,
initializer=initializer,
output=output,
name='classification')
sentence_outputs = self.classification(cls_output)
super(BertPretrainer, self).__init__(
inputs=inputs, outputs=[lm_outputs, sentence_outputs], **kwargs)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
|
PyTorch/SpeechRecognition/wav2vec2/scripts | scripts | finetune_base_100h | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
# A100 80GiB FP16: UPDATE_FREQ=1
# A100 80GiB TF32: UPDATE_FREQ=1
# IO
: ${DATASET_DIR:="/datasets/LibriSpeech"}
: ${TRAIN_SUBSET:="train-clean-100"}
: ${OUTPUT_DIR:="results/finetune_base_100h"}
: ${PRETRAINED_MODEL:=results/finetune_base/wav2vec2_update400000.pt}
# Batching
: ${NUM_GPUS:=8}
: ${MAX_TOKENS:=3200000}
: ${NUM_CONCAT_BATCHES:=1}
: ${UPDATE_FREQ:=1}
# Training
: ${LEARNING_RATE:=0.00003}
: ${FREEZE_FINETUNE_UPDATES:=0}
: ${MAX_UPDATE:=80000}
: ${MASK_CHANNEL_PROB:=0.5}
: ${MASK_PROB:=0.65}
bash scripts/finetune_vox_960h.sh "$@"
|
PyTorch/SpeechRecognition/Jasper/jasper | jasper | model | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from common import filter_warnings
activations = {
"hardtanh": nn.Hardtanh,
"relu": nn.ReLU,
"selu": nn.SELU,
}
def init_weights(m, mode='xavier_uniform'):
if type(m) == nn.Conv1d or type(m) == MaskedConv1d:
if mode == 'xavier_uniform':
nn.init.xavier_uniform_(m.weight, gain=1.0)
elif mode == 'xavier_normal':
nn.init.xavier_normal_(m.weight, gain=1.0)
elif mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
else:
raise ValueError("Unknown Initialization mode: {0}".format(mode))
elif type(m) == nn.BatchNorm1d:
if m.track_running_stats:
m.running_mean.zero_()
m.running_var.fill_(1)
m.num_batches_tracked.zero_()
if m.affine:
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def get_same_padding(kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
return (kernel_size // 2) * dilation
class MaskedConv1d(nn.Conv1d):
"""1D convolution with sequence masking
"""
__constants__ = ["masked"]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, masked=True):
super(MaskedConv1d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.masked = masked
def get_seq_len(self, lens):
# rounding_mode not available in 20.10 container
# return torch.div((lens + 2 * self.padding[0] - self.dilation[0]
# * (self.kernel_size[0] - 1) - 1), self.stride[0], rounding_mode="floor") + 1
return torch.floor((lens + 2 * self.padding[0] - self.dilation[0]
* (self.kernel_size[0] - 1) - 1) / self.stride[0]).long() + 1
def forward(self, x, x_lens=None):
if self.masked:
max_len = x.size(2)
idxs = torch.arange(max_len, dtype=x_lens.dtype, device=x_lens.device)
mask = idxs.expand(x_lens.size(0), max_len) >= x_lens.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1).to(device=x.device), 0)
x_lens = self.get_seq_len(x_lens)
return super(MaskedConv1d, self).forward(x), x_lens
class JasperBlock(nn.Module):
__constants__ = ["use_conv_masks"]
"""Jasper Block. See https://arxiv.org/pdf/1904.03288.pdf
"""
def __init__(self, infilters, filters, repeat=3, kernel_size=11, stride=1,
dilation=1, padding='same', dropout=0.2, activation=None,
residual=True, residual_panes=[], use_conv_masks=False):
super(JasperBlock, self).__init__()
assert padding == "same", "Only 'same' padding is supported."
padding_val = get_same_padding(kernel_size[0], stride[0], dilation[0])
self.use_conv_masks = use_conv_masks
self.conv = nn.ModuleList()
for i in range(repeat):
self.conv.extend(self._conv_bn(infilters if i == 0 else filters,
filters,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding_val))
if i < repeat - 1:
self.conv.extend(self._act_dropout(dropout, activation))
self.res = nn.ModuleList() if residual else None
res_panes = residual_panes.copy()
self.dense_residual = residual
if residual:
if len(residual_panes) == 0:
res_panes = [infilters]
self.dense_residual = False
for ip in res_panes:
self.res.append(nn.ModuleList(
self._conv_bn(ip, filters, kernel_size=1)))
self.out = nn.Sequential(*self._act_dropout(dropout, activation))
def _conv_bn(self, in_channels, out_channels, **kw):
return [MaskedConv1d(in_channels, out_channels,
masked=self.use_conv_masks, **kw),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1)]
def _act_dropout(self, dropout=0.2, activation=None):
return [activation or nn.Hardtanh(min_val=0.0, max_val=20.0),
nn.Dropout(p=dropout)]
def forward(self, xs, xs_lens=None):
if not self.use_conv_masks:
xs_lens = 0
# forward convolutions
out = xs[-1]
lens = xs_lens
for i, l in enumerate(self.conv):
if isinstance(l, MaskedConv1d):
out, lens = l(out, lens)
else:
out = l(out)
# residuals
if self.res is not None:
for i, layer in enumerate(self.res):
res_out = xs[i]
for j, res_layer in enumerate(layer):
if j == 0: # and self.use_conv_mask:
res_out, _ = res_layer(res_out, xs_lens)
else:
res_out = res_layer(res_out)
out += res_out
# output
out = self.out(out)
if self.res is not None and self.dense_residual:
out = xs + [out]
else:
out = [out]
if self.use_conv_masks:
return out, lens
else:
return out, None
class JasperEncoder(nn.Module):
__constants__ = ["use_conv_masks"]
def __init__(self, in_feats, activation, frame_splicing=1,
init='xavier_uniform', use_conv_masks=False, blocks=[]):
super(JasperEncoder, self).__init__()
self.use_conv_masks = use_conv_masks
self.layers = nn.ModuleList()
in_feats *= frame_splicing
all_residual_panes = []
for i,blk in enumerate(blocks):
blk['activation'] = activations[activation]()
has_residual_dense = blk.pop('residual_dense', False)
if has_residual_dense:
all_residual_panes += [in_feats]
blk['residual_panes'] = all_residual_panes
else:
blk['residual_panes'] = []
self.layers.append(
JasperBlock(in_feats, use_conv_masks=use_conv_masks, **blk))
in_feats = blk['filters']
self.apply(lambda x: init_weights(x, mode=init))
def forward(self, x, x_lens=None):
out, out_lens = [x], x_lens
for l in self.layers:
out, out_lens = l(out, out_lens)
return out, out_lens
class JasperDecoderForCTC(nn.Module):
def __init__(self, in_feats, n_classes, init='xavier_uniform'):
super(JasperDecoderForCTC, self).__init__()
self.layers = nn.Sequential(
nn.Conv1d(in_feats, n_classes, kernel_size=1, bias=True),)
self.apply(lambda x: init_weights(x, mode=init))
def forward(self, enc_out):
out = self.layers(enc_out[-1]).transpose(1, 2)
return F.log_softmax(out, dim=2)
class GreedyCTCDecoder(nn.Module):
@torch.no_grad()
def forward(self, log_probs, log_prob_lens=None):
if log_prob_lens is not None:
max_len = log_probs.size(1)
idxs = torch.arange(max_len, dtype=log_prob_lens.dtype,
device=log_prob_lens.device)
mask = idxs.unsqueeze(0) >= log_prob_lens.unsqueeze(1)
log_probs[:,:,-1] = log_probs[:,:,-1].masked_fill(mask, float("Inf"))
return log_probs.argmax(dim=-1, keepdim=False).int()
class Jasper(nn.Module):
def __init__(self, encoder_kw, decoder_kw, transpose_in=False):
super(Jasper, self).__init__()
self.transpose_in = transpose_in
self.encoder = JasperEncoder(**encoder_kw)
self.decoder = JasperDecoderForCTC(**decoder_kw)
def forward(self, x, x_lens=None):
if self.encoder.use_conv_masks:
assert x_lens is not None
enc, enc_lens = self.encoder(x, x_lens)
out = self.decoder(enc)
return out, enc_lens
else:
if self.transpose_in:
x = x.transpose(1, 2)
enc, _ = self.encoder(x)
out = self.decoder(enc)
return out # torchscript refuses to output None
# TODO Explicitly add x_lens=None for inference (now x can be a Tensor or tuple)
def infer(self, x, x_lens=None):
if self.encoder.use_conv_masks:
return self.forward(x, x_lens)
else:
ret = self.forward(x)
return ret, len(ret)
class CTCLossNM:
def __init__(self, n_classes):
self._criterion = nn.CTCLoss(blank=n_classes-1, reduction='none')
def __call__(self, log_probs, targets, input_length, target_length):
input_length = input_length.long()
target_length = target_length.long()
targets = targets.long()
loss = self._criterion(log_probs.transpose(1, 0), targets, input_length,
target_length)
# note that this is different from reduction = 'mean'
# because we are not dividing by target lengths
return torch.mean(loss)
|
PyTorch/Forecasting/TFT | TFT | utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch.distributed as dist
import torch
class PerformanceMeter():
def __init__(self, benchmark_mode=True):
self.benchmark_mode = benchmark_mode
self.reset()
def reset(self):
if self.benchmark_mode:
torch.cuda.synchronize()
self.avg = 0
self.count = 0
self.total_time = 0
self.last_update_time = time.time()
self.intervals = []
def update(self, n, exclude_from_total=False):
if self.benchmark_mode:
torch.cuda.synchronize()
delta = time.time() - self.last_update_time
self.intervals.append(delta)
if not exclude_from_total:
self.total_time += delta
self.count += n
self.avg = self.count / self.total_time
self.last_update_time = time.time()
return n/delta
def reset_current_lap(self):
if self.benchmark_mode:
torch.cuda.synchronize()
self.last_update_time = time.time()
def p(self, i):
assert i <= 100
idx = int(len(self.intervals) * i / 100)
return sorted(self.intervals)[idx]
def print_once(*args, **kwargs):
if not dist.is_initialized() or dist.get_rank() == 0:
print(*args, **kwargs)
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks | networks | masked_lm | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Masked language model network."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Text')
class MaskedLM(tf.keras.Model):
"""Masked language model network head for BERT modeling.
This network implements a masked language model based on the provided network.
It assumes that the network being passed has a "get_embedding_table()" method.
Attributes:
input_width: The innermost dimension of the input tensor to this network.
num_predictions: The number of predictions to make per sequence.
source_network: The network with the embedding layer to use for the
embedding layer.
activation: The activation, if any, for the dense layer in this network.
initializer: The intializer for the dense layer in this network. Defaults to
a Glorot uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
input_width,
num_predictions,
source_network,
float_type,
activation=None,
initializer='glorot_uniform',
output='logits',
**kwargs):
embedding_table = source_network.get_embedding_table()
vocab_size, hidden_size = embedding_table.shape
sequence_data = tf.keras.layers.Input(
shape=(None, input_width), name='sequence_data', dtype=tf.float32)
masked_lm_positions = tf.keras.layers.Input(
shape=(num_predictions,), name='masked_lm_positions', dtype=tf.int32)
masked_lm_input = tf.keras.layers.Lambda(
lambda x: self._gather_indexes(x[0], x[1]))(
[sequence_data, masked_lm_positions])
lm_data = (
tf.keras.layers.Dense(
hidden_size,
activation=activation,
kernel_initializer=initializer,
name='cls/predictions/transform/dense')(masked_lm_input))
lm_data = tf.keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12, name='cls/predictions/transform/LayerNorm')(
lm_data)
lm_data = tf.keras.layers.Lambda(
lambda x: tf.matmul(x, tf.cast(embedding_table, float_type), transpose_b=True))(
lm_data)
logits = Bias(
initializer=tf.keras.initializers.Zeros(),
name='cls/predictions/output_bias')(
lm_data)
# We can't use the standard Keras reshape layer here, since it expects
# the input and output batch size to be the same.
reshape_layer = tf.keras.layers.Lambda(
lambda x: tf.reshape(x, [-1, num_predictions, vocab_size]))
self.logits = reshape_layer(logits)
predictions = tf.keras.layers.Activation(tf.nn.log_softmax, dtype='float32')(self.logits)
if output == 'logits':
output_tensors = self.logits
elif output == 'predictions':
output_tensors = predictions
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
super(MaskedLM, self).__init__(
inputs=[sequence_data, masked_lm_positions],
outputs=output_tensors,
**kwargs)
def get_config(self):
raise NotImplementedError('MaskedLM cannot be directly serialized at this '
'time. Please use it only in Layers or '
'functionally subclassed Models/Networks.')
def _gather_indexes(self, sequence_tensor, positions):
"""Gathers the vectors at the specific positions.
Args:
sequence_tensor: Sequence output of `BertModel` layer of shape
(`batch_size`, `seq_length`, num_hidden) where num_hidden is number of
hidden units of `BertModel` layer.
positions: Positions ids of tokens in sequence to mask for pretraining
of with dimension (batch_size, num_predictions) where
`num_predictions` is maximum number of tokens to mask out and predict
per each sequence.
Returns:
Masked out sequence tensor of shape (batch_size * num_predictions,
num_hidden).
"""
sequence_shape = tf_utils.get_shape_list(
sequence_tensor, name='sequence_output_tensor')
batch_size, seq_length, width = sequence_shape
flat_offsets = tf.keras.backend.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.keras.backend.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.keras.backend.reshape(
sequence_tensor, [batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
@tf.keras.utils.register_keras_serializable(package='Text')
# Temporary until we can create a Dense layer that ties the embedding.
class Bias(tf.keras.layers.Layer):
"""Adds a bias term to an input."""
def __init__(self,
initializer='zeros',
regularizer=None,
constraint=None,
activation=None,
**kwargs):
super(Bias, self).__init__(**kwargs)
self._initializer = tf.keras.initializers.get(initializer)
self._regularizer = tf.keras.regularizers.get(regularizer)
self._constraint = tf.keras.constraints.get(constraint)
self._activation = tf.keras.activations.get(activation)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
self._bias = self.add_weight(
'bias',
shape=input_shape[1:],
initializer=self._initializer,
regularizer=self._regularizer,
constraint=self._constraint,
dtype=self._dtype,
trainable=True)
super(Bias, self).build(input_shape)
def get_config(self):
config = {
'activation': tf.keras.activations.serialize(self._activation),
'initializer': tf.keras.initializers.serialize(self._initializer),
'regularizer': tf.keras.regularizers.serialize(self._regularizer),
'constraint': tf.keras.constraints.serialize(self._constraint)
}
base_config = super(Bias, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
outputs = tf.nn.bias_add(inputs, self._bias)
if self._activation is not None:
return self._activation(outputs) # pylint: disable=not-callable
else:
return outputs
|
PyTorch/Translation/GNMT | GNMT | README | # GNMT v2 For PyTorch
This repository provides a script and recipe to train the GNMT v2 model to
achieve state of the art accuracy, and is tested and maintained by NVIDIA.
## Table Of Contents
<!-- TOC GFM -->
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
* [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Training process](#training-process)
* [Inference process](#inference-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training accuracy: NVIDIA DGX-2H (16x V100 32GB)](#training-accuracy-nvidia-dgx-2h-16x-v100-32gb)
* [Training stability test](#training-stability-test)
* [Training throughput results](#training-throughput-results)
* [Training throughput: NVIDIA DGX A100 (8x A100 40GB)](#training-throughput-nvidia-dgx-a100-8x-a100-40gb)
* [Training throughput: NVIDIA DGX-1 (8x V100 16GB)](#training-throughput-nvidia-dgx-1-8x-v100-16gb)
* [Training throughput: NVIDIA DGX-2H (16x V100 32GB)](#training-throughput-nvidia-dgx-2h-16x-v100-32gb)
* [Inference accuracy results](#inference-accuracy-results)
* [Inference accuracy: NVIDIA A100 40GB](#inference-accuracy-nvidia-a100-40gb)
* [Inference accuracy: NVIDIA Tesla V100 16GB](#inference-accuracy-nvidia-tesla-v100-16gb)
* [Inference accuracy: NVIDIA T4](#inference-accuracy-nvidia-t4)
* [Inference throughput results](#inference-throughput-results)
* [Inference throughput: NVIDIA A100 40GB](#inference-throughput-nvidia-a100-40gb)
* [Inference throughput: NVIDIA T4](#inference-throughput-nvidia-t4)
* [Inference latency results](#inference-latency-results)
* [Inference latency: NVIDIA A100 40GB](#inference-latency-nvidia-a100-40gb)
* [Inference latency: NVIDIA T4](#inference-latency-nvidia-t4)
* [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
<!-- /TOC -->
## Model overview
The GNMT v2 model is similar to the one discussed in the [Google's Neural
Machine Translation System: Bridging the Gap between Human and Machine
Translation](https://arxiv.org/abs/1609.08144) paper.
The most important difference between the two models is in the attention
mechanism. In our model, the output from the first LSTM layer of the decoder
goes into the attention module, then the re-weighted context is concatenated
with inputs to all subsequent LSTM layers in the decoder at the current
time step.
The same attention mechanism is also implemented in the default GNMT-like
models from [TensorFlow Neural Machine Translation
Tutorial](https://github.com/tensorflow/nmt) and [NVIDIA OpenSeq2Seq
Toolkit](https://github.com/NVIDIA/OpenSeq2Seq).
### Model architecture

### Default configuration
The following features were implemented in this model:
* general:
* encoder and decoder are using shared embeddings
* data-parallel multi-GPU training
* dynamic loss scaling with backoff for Tensor Cores (mixed precision)
training
* trained with label smoothing loss (smoothing factor 0.1)
* encoder:
* 4-layer LSTM, hidden size 1024, first layer is bidirectional, the rest are
unidirectional
* with residual connections starting from 3rd layer
* uses standard PyTorch nn.LSTM layer
* dropout is applied on input to all LSTM layers, probability of dropout is
set to 0.2
* hidden state of LSTM layers is initialized with zeros
* weights and bias of LSTM layers is initialized with uniform(-0.1,0.1)
distribution
* decoder:
* 4-layer unidirectional LSTM with hidden size 1024 and fully-connected
classifier
* with residual connections starting from 3rd layer
* uses standard PyTorch nn.LSTM layer
* dropout is applied on input to all LSTM layers, probability of dropout is
set to 0.2
* hidden state of LSTM layers is initialized with zeros
* weights and bias of LSTM layers is initialized with uniform(-0.1,0.1)
distribution
* weights and bias of fully-connected classifier is initialized with
uniform(-0.1,0.1) distribution
* attention:
* normalized Bahdanau attention
* output from first LSTM layer of decoder goes into attention, then
re-weighted context is concatenated with the input to all subsequent LSTM
layers of the decoder at the current timestep
* linear transform of keys and queries is initialized with uniform(-0.1,
0.1), normalization scalar is initialized with 1.0/sqrt(1024),
normalization bias is initialized with zero
* inference:
* beam search with default beam size of 5
* with coverage penalty and length normalization, coverage penalty factor is
set to 0.1, length normalization factor is set to 0.6 and length
normalization constant is set to 5.0
* de-tokenized BLEU computed by
[SacreBLEU](https://github.com/mjpost/sacrebleu)
* [motivation](https://github.com/mjpost/sacrebleu#motivation) for choosing
SacreBLEU
When comparing the BLEU score, there are various tokenization approaches and
BLEU calculation methodologies; therefore, ensure you align similar metrics.
Code from this repository can be used to train a larger, 8-layer GNMT v2 model.
Our experiments show that a 4-layer model is significantly faster to train and
yields comparable accuracy on the public [WMT16
English-German](http://www.statmt.org/wmt16/translation-task.html) dataset. The
number of LSTM layers is controlled by the `--num-layers` parameter in the
`train.py` training script.
### Feature support matrix
The following features are supported by this model.
| **Feature** | **GNMT v2** |
|:------------|------------:|
|[Apex AMP](https://nvidia.github.io/apex/amp.html) | Yes |
|[Apex DistributedDataParallel](https://nvidia.github.io/apex/parallel.html#apex.parallel.DistributedDataParallel) | Yes |
#### Features
[Apex AMP](https://nvidia.github.io/apex/amp.html) - a tool that enables Tensor
Core-accelerated training. Refer to the [Enabling mixed
precision](#enabling-mixed-precision) section for more details.
[Apex
DistributedDataParallel](https://nvidia.github.io/apex/parallel.html#apex.parallel.DistributedDataParallel) -
a module wrapper that enables easy multiprocess distributed data parallel
training, similar to
[torch.nn.parallel.DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel).
`DistributedDataParallel` is optimized for use with
[NCCL](https://github.com/NVIDIA/nccl). It achieves high performance by
overlapping communication with computation during `backward()` and bucketing
smaller gradient transfers to reduce the total number of transfers required.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a
computational method.
[Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant
computational speedup by performing operations in half-precision format, while
storing minimal information in single-precision to retain as much information
as possible in critical parts of the network. Since the introduction of [Tensor
Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with
both the Turing and Ampere architectures, significant training speedups are
experienced by switching to mixed precision -- up to 3x overall speedup on the
most arithmetically intense model architectures. Using mixed precision training
previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Manually adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced
in the Pascal architecture and first supported in [CUDA
8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep
Learning SDK.
For information about:
* How to train using mixed precision, see the [Mixed Precision
Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed
Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
documentation.
* Techniques used for mixed precision training, see the [Mixed-Precision
Training of Deep Neural
Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/)
blog.
* APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy
Mixed-Precision Training in
PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/)
.
#### Enabling mixed precision
Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision
(AMP), library from [APEX](https://github.com/NVIDIA/apex) that casts variables
to half-precision upon retrieval, while storing variables in single-precision
format. Furthermore, to preserve small gradient magnitudes in backpropagation,
a [loss
scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling)
step must be included when applying gradients. In PyTorch, loss scaling can be
easily applied by using `scale_loss()` method provided by AMP. The scaling
value to be used can be
[dynamic](https://nvidia.github.io/apex/amp.html#apex.amp.initialize) or fixed.
For an in-depth walk through on AMP, check out sample usage
[here](https://nvidia.github.io/apex/amp.html#).
[APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains
utility libraries, such as AMP, which require minimal network code changes to
leverage Tensor Cores performance.
The following steps were needed to enable mixed precision training in GNMT:
* Import AMP from APEX (file: `seq2seq/train/trainer.py`):
```
from apex import amp
```
* Initialize AMP and wrap the model and the optimizer (file:
`seq2seq/train/trainer.py`, class: `Seq2SeqTrainer`):
```
self.model, self.optimizer = amp.initialize(
self.model,
self.optimizer,
cast_model_outputs=torch.float16,
keep_batchnorm_fp32=False,
opt_level='O2')
```
* Apply `scale_loss` context manager (file: `seq2seq/train/fp_optimizers.py`,
class: `AMPOptimizer`):
```
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
```
* Apply gradient clipping on single precision master weights (file:
`seq2seq/train/fp_optimizers.py`, class: `AMPOptimizer`):
```
if self.grad_clip != float('inf'):
clip_grad_norm_(amp.master_params(optimizer), self.grad_clip)
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA
A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the
matrix math also called tensor operations. TF32 running on Tensor Cores in A100
GPUs can provide up to 10x speedups compared to single-precision floating-point
math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of
accuracy. It is more robust than FP16 for models which require high dynamic
range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates
AI Training, HPC up to
20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/)
blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by
default.
## Setup
The following section lists the requirements in order to start training the
GNMT v2 model.
### Requirements
This repository contains `Dockerfile` which extends the PyTorch NGC container
and encapsulates some dependencies. Aside from these dependencies, ensure you
have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch 20.06-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
* GPU architecture:
* [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
* [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/)
* [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the
following sections from the NVIDIA GPU Cloud Documentation and the Deep
Learning DGX Documentation:
* [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html),
* [Accessing And Pulling From The NGC container registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry),
* [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running).
For those unable to use the Pytorch NGC container, to set up the required
environment or create your own container, see the versioned [NVIDIA Container
Support
Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using
FP32, perform the following steps using the default parameters of the GNMT v2
model on the WMT16 English German dataset. For the specifics concerning
training and inference, see the [Advanced](#advanced) section.
**1. Clone the repository.**
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/PyTorch/Translation/GNMT
```
**2. Build the GNMT v2 Docker container.**
```
bash scripts/docker/build.sh
```
**3. Start an interactive session in the container to run training/inference.**
```
bash scripts/docker/interactive.sh
```
**4. Download and preprocess the dataset.**
Data will be downloaded to the `data` directory (on the host). The `data`
directory is mounted to the `/workspace/gnmt/data` location in the Docker
container.
```
bash scripts/wmt16_en_de.sh
```
**5. Start training.**
The training script saves only one checkpoint with the lowest value of the loss
function on the validation dataset. All results and logs are saved to the
`gnmt` directory (on the host) or to the `/workspace/gnmt/gnmt` directory
(in the container). By default, the `train.py` script will launch mixed
precision training with Tensor Cores. You can change this behavior by setting:
* the `--math fp32` flag to launch single precision training (for NVIDIA Volta
and NVIDIA Turing architectures) or
* the `--math tf32` flag to launch TF32 training with Tensor Cores (for NVIDIA
Ampere architecture)
for the `train.py` training script.
To launch mixed precision training on 1, 4 or 8 GPUs, run:
```
python3 -m torch.distributed.launch --nproc_per_node=<#GPUs> train.py --seed 2 --train-global-batch-size 1024
```
To launch mixed precision training on 16 GPUs, run:
```
python3 -m torch.distributed.launch --nproc_per_node=16 train.py --seed 2 --train-global-batch-size 2048
```
By default, the training script will launch training with batch size 128 per
GPU. If `--train-global-batch-size` is specified and larger than 128 times the
number of GPUs available for the training then the training script will
accumulate gradients over consecutive iterations and then perform the weight
update. For example, 1 GPU training with `--train-global-batch-size 1024` will
accumulate gradients over 8 iterations before doing the weight update with
accumulated gradients.
**6. Start evaluation.**
The training process automatically runs evaluation and outputs the BLEU score
after each training epoch. Additionally, after the training is done, you can
manually run inference on the test dataset with the checkpoint saved during the
training.
To launch FP16 inference on the `newstest2014.en` test set, run:
```
python3 translate.py \
--input data/wmt16_de_en/newstest2014.en \
--reference data/wmt16_de_en/newstest2014.de \
--output /tmp/output \
--model gnmt/model_best.pth
```
The script will load the checkpoint specified by the `--model` option, then it
will launch inference on the file specified by the `--input` option, and
compute BLEU score against the reference translation specified by the
`--reference` option. Outputs will be stored to the location specified by the
`--output` option.
Additionally, one can pass the input text directly from the command-line:
```
python3 translate.py \
--input-text "The quick brown fox jumps over the lazy dog" \
--model gnmt/model_best.pth
```
Translated output will be printed to the console:
```
(...)
0: Translated output:
Der schnelle braune Fuchs springt über den faulen Hund
```
By default, the `translate.py` script will launch FP16 inference with Tensor
Cores. You can change this behavior by setting:
* the `--math fp32` flag to launch single precision inference (for NVIDIA Volta
and NVIDIA Turing architectures) or
* the `--math tf32` flag to launch TF32 inference with Tensor Cores (for NVIDIA
Ampere architecture)
for the `translate.py` inference script.
## Advanced
The following sections provide greater details of the dataset, running training
and inference, and the training results.
### Scripts and sample code
In the `root` directory, the most important files are:
* `train.py`: serves as the entry point to launch the training
* `translate.py`: serves as the entry point to launch inference
* `Dockerfile`: container with the basic set of dependencies to run GNMT v2
* `requirements.txt`: set of extra requirements for running GNMT v2
The `seq2seq/model` directory contains the implementation of GNMT v2 building
blocks:
* `attention.py`: implementation of normalized Bahdanau attention
* `encoder.py`: implementation of recurrent encoder
* `decoder.py`: implementation of recurrent decoder with attention
* `seq2seq_base.py`: base class for seq2seq models
* `gnmt.py`: implementation of GNMT v2 model
The `seq2seq/train` directory encapsulates the necessary tools to execute
training:
* `trainer.py`: implementation of training loop
* `smoothing.py`: implementation of cross-entropy with label smoothing
* `lr_scheduler.py`: implementation of exponential learning rate warmup and
step decay
* `fp_optimizers.py`: implementation of optimizers for various floating point
precisions
The `seq2seq/inference` directory contains scripts required to run inference:
* `beam_search.py`: implementation of beam search with length normalization and
length penalty
* `translator.py`: implementation of auto-regressive inference
The `seq2seq/data` directory contains implementation of components needed for
data loading:
* `dataset.py`: implementation of text datasets
* `sampler.py`: implementation of batch samplers with bucketing by sequence
length
* `tokenizer.py`: implementation of tokenizer (maps integer vocabulary indices
to text)
### Parameters
Training
The complete list of available parameters for the `train.py` training script
contains:
```
dataset setup:
--dataset-dir DATASET_DIR
path to the directory with training/test data
(default: data/wmt16_de_en)
--src-lang SRC_LANG source language (default: en)
--tgt-lang TGT_LANG target language (default: de)
--vocab VOCAB path to the vocabulary file (relative to DATASET_DIR
directory) (default: vocab.bpe.32000)
-bpe BPE_CODES, --bpe-codes BPE_CODES
path to the file with bpe codes (relative to
DATASET_DIR directory) (default: bpe.32000)
--train-src TRAIN_SRC
path to the training source data file (relative to
DATASET_DIR directory) (default:
train.tok.clean.bpe.32000.en)
--train-tgt TRAIN_TGT
path to the training target data file (relative to
DATASET_DIR directory) (default:
train.tok.clean.bpe.32000.de)
--val-src VAL_SRC path to the validation source data file (relative to
DATASET_DIR directory) (default:
newstest_dev.tok.clean.bpe.32000.en)
--val-tgt VAL_TGT path to the validation target data file (relative to
DATASET_DIR directory) (default:
newstest_dev.tok.clean.bpe.32000.de)
--test-src TEST_SRC path to the test source data file (relative to
DATASET_DIR directory) (default:
newstest2014.tok.bpe.32000.en)
--test-tgt TEST_TGT path to the test target data file (relative to
DATASET_DIR directory) (default: newstest2014.de)
--train-max-size TRAIN_MAX_SIZE
use at most TRAIN_MAX_SIZE elements from training
dataset (useful for benchmarking), by default uses
entire dataset (default: None)
results setup:
--save-dir SAVE_DIR path to directory with results, it will be
automatically created if it does not exist (default:
gnmt)
--print-freq PRINT_FREQ
print log every PRINT_FREQ batches (default: 10)
model setup:
--hidden-size HIDDEN_SIZE
hidden size of the model (default: 1024)
--num-layers NUM_LAYERS
number of RNN layers in encoder and in decoder
(default: 4)
--dropout DROPOUT dropout applied to input of RNN cells (default: 0.2)
--share-embedding use shared embeddings for encoder and decoder (use '--
no-share-embedding' to disable) (default: True)
--smoothing SMOOTHING
label smoothing, if equal to zero model will use
CrossEntropyLoss, if not zero model will be trained
with label smoothing loss (default: 0.1)
general setup:
--math {fp16,fp32,tf32,manual_fp16}
precision (default: fp16)
--seed SEED master seed for random number generators, if "seed" is
undefined then the master seed will be sampled from
random.SystemRandom() (default: None)
--prealloc-mode {off,once,always}
controls preallocation (default: always)
--dllog-file DLLOG_FILE
Name of the DLLogger output file (default:
train_log.json)
--eval run validation and test after every epoch (use '--no-
eval' to disable) (default: True)
--env print info about execution env (use '--no-env' to
disable) (default: True)
--cuda enables cuda (use '--no-cuda' to disable) (default:
True)
--cudnn enables cudnn (use '--no-cudnn' to disable) (default:
True)
--log-all-ranks enables logging from all distributed ranks, if
disabled then only logs from rank 0 are reported (use
'--no-log-all-ranks' to disable) (default: True)
training setup:
--train-batch-size TRAIN_BATCH_SIZE
training batch size per worker (default: 128)
--train-global-batch-size TRAIN_GLOBAL_BATCH_SIZE
global training batch size, this argument does not
have to be defined, if it is defined it will be used
to automatically compute train_iter_size using the
equation: train_iter_size = train_global_batch_size //
(train_batch_size * world_size) (default: None)
--train-iter-size N training iter size, training loop will accumulate
gradients over N iterations and execute optimizer
every N steps (default: 1)
--epochs EPOCHS max number of training epochs (default: 6)
--grad-clip GRAD_CLIP
enables gradient clipping and sets maximum norm of
gradients (default: 5.0)
--train-max-length TRAIN_MAX_LENGTH
maximum sequence length for training (including
special BOS and EOS tokens) (default: 50)
--train-min-length TRAIN_MIN_LENGTH
minimum sequence length for training (including
special BOS and EOS tokens) (default: 0)
--train-loader-workers TRAIN_LOADER_WORKERS
number of workers for training data loading (default:
2)
--batching {random,sharding,bucketing}
select batching algorithm (default: bucketing)
--shard-size SHARD_SIZE
shard size for "sharding" batching algorithm, in
multiples of global batch size (default: 80)
--num-buckets NUM_BUCKETS
number of buckets for "bucketing" batching algorithm
(default: 5)
optimizer setup:
--optimizer OPTIMIZER
training optimizer (default: Adam)
--lr LR learning rate (default: 0.002)
--optimizer-extra OPTIMIZER_EXTRA
extra options for the optimizer (default: {})
mixed precision loss scaling setup:
--init-scale INIT_SCALE
initial loss scale (default: 8192)
--upscale-interval UPSCALE_INTERVAL
loss upscaling interval (default: 128)
learning rate scheduler setup:
--warmup-steps WARMUP_STEPS
number of learning rate warmup iterations (default:
200)
--remain-steps REMAIN_STEPS
starting iteration for learning rate decay (default:
0.666)
--decay-interval DECAY_INTERVAL
interval between learning rate decay steps (default:
None)
--decay-steps DECAY_STEPS
max number of learning rate decay steps (default: 4)
--decay-factor DECAY_FACTOR
learning rate decay factor (default: 0.5)
validation setup:
--val-batch-size VAL_BATCH_SIZE
batch size for validation (default: 64)
--val-max-length VAL_MAX_LENGTH
maximum sequence length for validation (including
special BOS and EOS tokens) (default: 125)
--val-min-length VAL_MIN_LENGTH
minimum sequence length for validation (including
special BOS and EOS tokens) (default: 0)
--val-loader-workers VAL_LOADER_WORKERS
number of workers for validation data loading
(default: 0)
test setup:
--test-batch-size TEST_BATCH_SIZE
batch size for test (default: 128)
--test-max-length TEST_MAX_LENGTH
maximum sequence length for test (including special
BOS and EOS tokens) (default: 150)
--test-min-length TEST_MIN_LENGTH
minimum sequence length for test (including special
BOS and EOS tokens) (default: 0)
--beam-size BEAM_SIZE
beam size (default: 5)
--len-norm-factor LEN_NORM_FACTOR
length normalization factor (default: 0.6)
--cov-penalty-factor COV_PENALTY_FACTOR
coverage penalty factor (default: 0.1)
--len-norm-const LEN_NORM_CONST
length normalization constant (default: 5.0)
--intra-epoch-eval N evaluate within training epoch, this option will
enable extra N equally spaced evaluations executed
during each training epoch (default: 0)
--test-loader-workers TEST_LOADER_WORKERS
number of workers for test data loading (default: 0)
checkpointing setup:
--start-epoch START_EPOCH
manually set initial epoch counter (default: 0)
--resume PATH resumes training from checkpoint from PATH (default:
None)
--save-all saves checkpoint after every epoch (default: False)
--save-freq SAVE_FREQ
save checkpoint every SAVE_FREQ batches (default:
5000)
--keep-checkpoints KEEP_CHECKPOINTS
keep only last KEEP_CHECKPOINTS checkpoints, affects
only checkpoints controlled by --save-freq option
(default: 0)
benchmark setup:
--target-perf TARGET_PERF
target training performance (in tokens per second)
(default: None)
--target-bleu TARGET_BLEU
target accuracy (default: None)
```
Inference
The complete list of available parameters for the `translate.py` inference
script contains:
```
data setup:
-o OUTPUT, --output OUTPUT
full path to the output file if not specified, then
the output will be printed (default: None)
-r REFERENCE, --reference REFERENCE
full path to the file with reference translations (for
sacrebleu, raw text) (default: None)
-m MODEL, --model MODEL
full path to the model checkpoint file (default: None)
--synthetic use synthetic dataset (default: False)
--synthetic-batches SYNTHETIC_BATCHES
number of synthetic batches to generate (default: 64)
--synthetic-vocab SYNTHETIC_VOCAB
size of synthetic vocabulary (default: 32320)
--synthetic-len SYNTHETIC_LEN
sequence length of synthetic samples (default: 50)
-i INPUT, --input INPUT
full path to the input file (raw text) (default: None)
-t INPUT_TEXT [INPUT_TEXT ...], --input-text INPUT_TEXT [INPUT_TEXT ...]
raw input text (default: None)
--sort sorts dataset by sequence length (use '--no-sort' to
disable) (default: False)
inference setup:
--batch-size BATCH_SIZE [BATCH_SIZE ...]
batch size per GPU (default: [128])
--beam-size BEAM_SIZE [BEAM_SIZE ...]
beam size (default: [5])
--max-seq-len MAX_SEQ_LEN
maximum generated sequence length (default: 80)
--len-norm-factor LEN_NORM_FACTOR
length normalization factor (default: 0.6)
--cov-penalty-factor COV_PENALTY_FACTOR
coverage penalty factor (default: 0.1)
--len-norm-const LEN_NORM_CONST
length normalization constant (default: 5.0)
general setup:
--math {fp16,fp32,tf32} [{fp16,fp32,tf32} ...]
precision (default: ['fp16'])
--env print info about execution env (use '--no-env' to
disable) (default: False)
--bleu compares with reference translation and computes BLEU
(use '--no-bleu' to disable) (default: True)
--cuda enables cuda (use '--no-cuda' to disable) (default:
True)
--cudnn enables cudnn (use '--no-cudnn' to disable) (default:
True)
--batch-first uses (batch, seq, feature) data format for RNNs
(default: True)
--seq-first uses (seq, batch, feature) data format for RNNs
(default: True)
--save-dir SAVE_DIR path to directory with results, it will be
automatically created if it does not exist (default:
gnmt)
--dllog-file DLLOG_FILE
Name of the DLLogger output file (default:
eval_log.json)
--print-freq PRINT_FREQ, -p PRINT_FREQ
print log every PRINT_FREQ batches (default: 1)
benchmark setup:
--target-perf TARGET_PERF
target inference performance (in tokens per second)
(default: None)
--target-bleu TARGET_BLEU
target accuracy (default: None)
--repeat REPEAT [REPEAT ...]
loops over the dataset REPEAT times, flag accepts
multiple arguments, one for each specified batch size
(default: [1])
--warmup WARMUP warmup iterations for performance counters (default:
0)
--percentiles PERCENTILES [PERCENTILES ...]
Percentiles for confidence intervals for
throughput/latency benchmarks (default: (90, 95, 99))
--tables print accuracy, throughput and latency results in
tables (use '--no-tables' to disable) (default: False)
```
### Command-line options
To see the full list of available options and their descriptions, use the `-h`
or `--help` command line option. For example, for training:
```
python3 train.py --help
usage: train.py [-h] [--dataset-dir DATASET_DIR] [--src-lang SRC_LANG]
[--tgt-lang TGT_LANG] [--vocab VOCAB] [-bpe BPE_CODES]
[--train-src TRAIN_SRC] [--train-tgt TRAIN_TGT]
[--val-src VAL_SRC] [--val-tgt VAL_TGT] [--test-src TEST_SRC]
[--test-tgt TEST_TGT] [--save-dir SAVE_DIR]
[--print-freq PRINT_FREQ] [--hidden-size HIDDEN_SIZE]
[--num-layers NUM_LAYERS] [--dropout DROPOUT]
[--share-embedding] [--smoothing SMOOTHING]
[--math {fp16,fp32,tf32,manual_fp16}] [--seed SEED]
[--prealloc-mode {off,once,always}] [--dllog-file DLLOG_FILE]
[--eval] [--env] [--cuda] [--cudnn] [--log-all-ranks]
[--train-max-size TRAIN_MAX_SIZE]
[--train-batch-size TRAIN_BATCH_SIZE]
[--train-global-batch-size TRAIN_GLOBAL_BATCH_SIZE]
[--train-iter-size N] [--epochs EPOCHS]
[--grad-clip GRAD_CLIP] [--train-max-length TRAIN_MAX_LENGTH]
[--train-min-length TRAIN_MIN_LENGTH]
[--train-loader-workers TRAIN_LOADER_WORKERS]
[--batching {random,sharding,bucketing}]
[--shard-size SHARD_SIZE] [--num-buckets NUM_BUCKETS]
[--optimizer OPTIMIZER] [--lr LR]
[--optimizer-extra OPTIMIZER_EXTRA] [--init-scale INIT_SCALE]
[--upscale-interval UPSCALE_INTERVAL]
[--warmup-steps WARMUP_STEPS] [--remain-steps REMAIN_STEPS]
[--decay-interval DECAY_INTERVAL] [--decay-steps DECAY_STEPS]
[--decay-factor DECAY_FACTOR]
[--val-batch-size VAL_BATCH_SIZE]
[--val-max-length VAL_MAX_LENGTH]
[--val-min-length VAL_MIN_LENGTH]
[--val-loader-workers VAL_LOADER_WORKERS]
[--test-batch-size TEST_BATCH_SIZE]
[--test-max-length TEST_MAX_LENGTH]
[--test-min-length TEST_MIN_LENGTH] [--beam-size BEAM_SIZE]
[--len-norm-factor LEN_NORM_FACTOR]
[--cov-penalty-factor COV_PENALTY_FACTOR]
[--len-norm-const LEN_NORM_CONST] [--intra-epoch-eval N]
[--test-loader-workers TEST_LOADER_WORKERS]
[--start-epoch START_EPOCH] [--resume PATH] [--save-all]
[--save-freq SAVE_FREQ] [--keep-checkpoints KEEP_CHECKPOINTS]
[--target-perf TARGET_PERF] [--target-bleu TARGET_BLEU]
[--local_rank LOCAL_RANK]
```
For example, for inference:
```
python3 translate.py --help
usage: translate.py [-h] [-o OUTPUT] [-r REFERENCE] [-m MODEL] [--synthetic]
[--synthetic-batches SYNTHETIC_BATCHES]
[--synthetic-vocab SYNTHETIC_VOCAB]
[--synthetic-len SYNTHETIC_LEN]
[-i INPUT | -t INPUT_TEXT [INPUT_TEXT ...]] [--sort]
[--batch-size BATCH_SIZE [BATCH_SIZE ...]]
[--beam-size BEAM_SIZE [BEAM_SIZE ...]]
[--max-seq-len MAX_SEQ_LEN]
[--len-norm-factor LEN_NORM_FACTOR]
[--cov-penalty-factor COV_PENALTY_FACTOR]
[--len-norm-const LEN_NORM_CONST]
[--math {fp16,fp32,tf32} [{fp16,fp32,tf32} ...]] [--env]
[--bleu] [--cuda] [--cudnn] [--batch-first | --seq-first]
[--save-dir SAVE_DIR] [--dllog-file DLLOG_FILE]
[--print-freq PRINT_FREQ] [--target-perf TARGET_PERF]
[--target-bleu TARGET_BLEU] [--repeat REPEAT [REPEAT ...]]
[--warmup WARMUP]
[--percentiles PERCENTILES [PERCENTILES ...]] [--tables]
[--local_rank LOCAL_RANK]
```
### Getting the data
The GNMT v2 model was trained on the [WMT16
English-German](http://www.statmt.org/wmt16/translation-task.html) dataset.
Concatenation of the newstest2015 and newstest2016 test sets are used as a
validation dataset and the newstest2014 is used as a testing dataset.
This repository contains the `scripts/wmt16_en_de.sh` download script which
automatically downloads and preprocesses the training, validation and test
datasets. By default, data is downloaded to the `data` directory.
Our download script is very similar to the `wmt16_en_de.sh` script from the
[tensorflow/nmt](https://github.com/tensorflow/nmt/blob/master/nmt/scripts/wmt16_en_de.sh)
repository. Our download script contains an extra preprocessing step, which
discards all pairs of sentences which can't be decoded by *latin-1* encoder.
The `scripts/wmt16_en_de.sh` script uses the
[subword-nmt](https://github.com/rsennrich/subword-nmt) package to segment text
into subword units (Byte Pair Encodings -
[BPE](https://en.wikipedia.org/wiki/Byte_pair_encoding)). By default, the
script builds the shared vocabulary of 32,000 tokens.
In order to test with other datasets, the script needs to be customized
accordingly.
#### Dataset guidelines
The process of downloading and preprocessing the data can be found in the
`scripts/wmt16_en_de.sh` script.
Initially, data is downloaded from [www.statmt.org](www.statmt.org). Then
`europarl-v7`, `commoncrawl` and `news-commentary` corpora are concatenated to
form the training dataset, similarly `newstest2015` and `newstest2016` are
concatenated to form the validation dataset. Raw data is preprocessed with
[Moses](https://github.com/moses-smt/mosesdecoder), first by launching [Moses
tokenizer](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl)
(tokenizer breaks up text into individual words), then by launching
[clean-corpus-n.perl](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/training/clean-corpus-n.perl)
which removes invalid sentences and does initial filtering by sequence length.
Second stage of preprocessing is done by launching the
`scripts/filter_dataset.py` script, which discards all pairs of sentences that
can't be decoded by latin-1 encoder.
Third state of preprocessing uses the
[subword-nmt](https://github.com/rsennrich/subword-nmt) package. First it
builds shared [byte pair
encoding](https://en.wikipedia.org/wiki/Byte_pair_encoding) vocabulary with
32,000 merge operations (command `subword-nmt learn-bpe`), then it applies
generated vocabulary to training, validation and test corpora (command
`subword-nmt apply-bpe`).
### Training process
The default training configuration can be launched by running the `train.py`
training script. By default, the training script saves only one checkpoint with
the lowest value of the loss function on the validation dataset. An evaluation
is then performed after each training epoch. Results are stored in the
`gnmt` directory.
The training script launches data-parallel training with batch size 128 per GPU
on all available GPUs. We have tested reliance on up to 16 GPUs on a single
node.
After each training epoch, the script runs an evaluation on the validation
dataset and outputs a BLEU score on the test dataset (newstest2014). BLEU is
computed by the [SacreBLEU](https://github.com/mjpost/sacreBLEU) package. Logs
from the training and evaluation are saved to the `gnmt` directory.
The summary after each training epoch is printed in the following format:
```
0: Summary: Epoch: 3 Training Loss: 3.1336 Validation Loss: 2.9587 Test BLEU: 23.18
0: Performance: Epoch: 3 Training: 418772 Tok/s Validation: 1445331 Tok/s
```
The training loss is averaged over an entire training epoch, the validation
loss is averaged over the validation dataset and the BLEU score is computed on
the test dataset. Performance is reported in total tokens per second. The
result is averaged over an entire training epoch and summed over all GPUs
participating in the training.
By default, the `train.py` script will launch mixed precision training with
Tensor Cores. You can change this behavior by setting:
* the `--math fp32` flag to launch single precision training (for NVIDIA Volta
and NVIDIA Turing architectures) or
* the `--math tf32` flag to launch TF32 training with Tensor Cores (for NVIDIA
Ampere architecture)
for the `train.py` training script.
To view all available options for training, run `python3 train.py --help`.
### Inference process
Inference can be run by launching the `translate.py` inference script,
although, it requires a pre-trained model checkpoint and tokenized input.
The inference script, `translate.py`, supports batched inference. By default,
it launches beam search with beam size of 5, coverage penalty term and length
normalization term. Greedy decoding can be enabled by setting the beam size to
1.
To view all available options for inference, run `python3 translate.py --help`.
## Performance
The performance measurements in this document were conducted at the time of
publication and may not reflect the performance achieved from NVIDIA’s latest
software release. For the most up-to-date performance measurements, go to
[NVIDIA Data Center Deep Learning Product
Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model
performance in training and inference modes.
#### Training performance benchmark
Training is launched on batches of text data, different batches have different
sequence lengths (number of tokens in the longest sequence). Sequence length
and batch efficiency (ratio of non-pad tokens to total number of tokens) affect
performance of the training, therefore it's recommended to run the training on
a large chunk of training dataset to get a stable and reliable average training
performance. Ideally at least one full epoch of training should be launched to
get a good estimate of training performance.
The following commands will launch one epoch of training:
To launch mixed precision training on 1, 4 or 8 GPUs, run:
```
python3 -m torch.distributed.launch --nproc_per_node=<#GPUs> train.py --seed 2 --train-global-batch-size 1024 --epochs 1 --math fp16
```
To launch mixed precision training on 16 GPUs, run:
```
python3 -m torch.distributed.launch --nproc_per_node=16 train.py --seed 2 --train-global-batch-size 2048 --epochs 1 --math fp16
```
Change `--math fp16` to `--math fp32` to launch single precision training (for
NVIDIA Volta and NVIDIA Turing architectures) or to `--math tf32` to launch
TF32 training with Tensor Cores (for NVIDIA Ampere architecture).
After the training is completed, the `train.py` script prints a summary to
standard output. Performance results are printed in the following format:
```
(...)
0: Performance: Epoch: 0 Training: 418926 Tok/s Validation: 1430828 Tok/s
(...)
```
`Training: 418926 Tok/s` represents training throughput averaged over an entire
training epoch and summed over all GPUs participating in the training.
#### Inference performance benchmark
The inference performance and accuracy benchmarks require a checkpoint from a
fully trained model.
Command to launch the inference accuracy benchmark on NVIDIA Volta or on NVIDIA
Turing architectures:
```
python3 translate.py \
--model gnmt/model_best.pth \
--input data/wmt16_de_en/newstest2014.en \
--reference data/wmt16_de_en/newstest2014.de \
--output /tmp/output \
--math fp16 fp32 \
--batch-size 128 \
--beam-size 1 2 5 \
--tables
```
Command to launch the inference accuracy benchmark on NVIDIA Ampere architecture:
```
python3 translate.py \
--model gnmt/model_best.pth \
--input data/wmt16_de_en/newstest2014.en \
--reference data/wmt16_de_en/newstest2014.de \
--output /tmp/output \
--math fp16 tf32 \
--batch-size 128 \
--beam-size 1 2 5 \
--tables
```
Command to launch the inference throughput and latency benchmarks on NVIDIA
Volta or NVIDIA Turing architectures:
```
python3 translate.py \
--model gnmt/model_best.pth \
--input data/wmt16_de_en/newstest2014.en \
--reference data/wmt16_de_en/newstest2014.de \
--output /tmp/output \
--math fp16 fp32 \
--batch-size 1 2 4 8 32 128 512 \
--repeat 1 1 1 1 2 8 16 \
--beam-size 1 2 5 \
--warmup 5 \
--tables
```
Command to launch the inference throughput and latency benchmarks on NVIDIA
Ampere architecture:
```
python3 translate.py \
--model gnmt/model_best.pth \
--input data/wmt16_de_en/newstest2014.en \
--reference data/wmt16_de_en/newstest2014.de \
--output /tmp/output \
--math fp16 tf32 \
--batch-size 1 2 4 8 32 128 512 \
--repeat 1 1 1 1 2 8 16 \
--beam-size 1 2 5 \
--warmup 5 \
--tables
```
### Results
The following sections provide details on how we achieved our performance and
accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `train.py` script with the default
batch size = 128 per GPU in the pytorch-20.06-py3 NGC container on NVIDIA DGX
A100 with 8x A100 40GB GPUs.
Command to launch the training:
```
python3 -m torch.distributed.launch --nproc_per_node=<#GPUs> train.py --seed 2 --train-global-batch-size 1024 --math fp16
```
Change `--math fp16` to `--math tf32` to launch TF32 training with Tensor Cores.
| **GPUs** | **Batch Size / GPU** | **Accuracy - TF32 (BLEU)** | **Accuracy - Mixed precision (BLEU)** | **Time to Train - TF32 (minutes)** | **Time to Train - Mixed precision (minutes)** | **Time to Train Speedup (TF32 to Mixed precision)** |
| --- | --- | ----- | ----- | ----- | ------ | ---- |
| 8 | 128 | 24.46 | 24.60 | 34.7 | 22.7 | 1.53 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `train.py` script with the default
batch size = 128 per GPU in the pytorch-20.06-py3 NGC container on NVIDIA DGX-1
with 8x V100 16GB GPUs.
Command to launch the training:
```
python3 -m torch.distributed.launch --nproc_per_node=<#GPUs> train.py --seed 2 --train-global-batch-size 1024 --math fp16
```
Change `--math fp16` to `--math fp32` to launch single precision training.
| **GPUs** | **Batch Size / GPU** | **Accuracy - FP32 (BLEU)** | **Accuracy - Mixed precision (BLEU)** | **Time to Train - FP32 (minutes)** | **Time to Train - Mixed precision (minutes)** | **Time to Train Speedup (FP32 to Mixed precision)** |
| --- | --- | ----- | ----- | ----- | ------ | ---- |
| 1 | 128 | 24.41 | 24.42 | 810.0 | 224.0 | 3.62 |
| 4 | 128 | 24.40 | 24.33 | 218.2 | 69.5 | 3.14 |
| 8 | 128 | 24.45 | 24.38 | 112.0 | 38.6 | 2.90 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
##### Training accuracy: NVIDIA DGX-2H (16x V100 32GB)
Our results were obtained by running the `train.py` script with the default
batch size = 128 per GPU in the pytorch-20.06-py3 NGC container on NVIDIA DGX-2H
with 16x V100 32GB GPUs.
To launch mixed precision training on 16 GPUs, run:
```
python3 -m torch.distributed.launch --nproc_per_node=16 train.py --seed 2 --train-global-batch-size 2048 --math fp16
```
Change `--math fp16` to `--math fp32` to launch single precision training.
| **GPUs** | **Batch Size / GPU** | **Accuracy - FP32 (BLEU)** | **Accuracy - Mixed precision (BLEU)** | **Time to Train - FP32 (minutes)** | **Time to Train - Mixed precision (minutes)** | **Time to Train Speedup (FP32 to Mixed precision)** |
| --- | --- | ----- | ----- | ------ | ----- | ---- |
| 16 | 128 | 24.41 | 24.38 | 52.1 | 19.4 | 2.69 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.

##### Training stability test
The GNMT v2 model was trained for 6 epochs, starting from 32 different initial
random seeds. After each training epoch, the model was evaluated on the test
dataset and the BLEU score was recorded. The training was performed in the
pytorch-20.06-py3 Docker container on NVIDIA DGX A100 with 8x A100 40GB GPUs.
The following table summarizes the results of the stability test.
In the following table, the BLEU scores after each training epoch for different
initial random seeds are displayed.
| **Epoch** | **Average** | **Standard deviation** | **Minimum** | **Maximum** | **Median** |
| --- | ------ | ----- | ------ | ------ | ------ |
| 1 | 19.959 | 0.238 | 19.410 | 20.390 | 19.970 |
| 2 | 21.772 | 0.293 | 20.960 | 22.280 | 21.820 |
| 3 | 22.435 | 0.264 | 21.740 | 22.870 | 22.465 |
| 4 | 23.167 | 0.166 | 22.870 | 23.620 | 23.195 |
| 5 | 24.233 | 0.149 | 23.820 | 24.530 | 24.235 |
| 6 | 24.416 | 0.131 | 24.140 | 24.660 | 24.390 |
#### Training throughput results
##### Training throughput: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `train.py` training script in the
pytorch-20.06-py3 NGC container on NVIDIA DGX A100 with 8x A100 40GB GPUs.
Throughput performance numbers (in tokens per second) were averaged over an
entire training epoch.
| **GPUs** | **Batch size / GPU** | **Throughput - TF32 (tok/s)** | **Throughput - Mixed precision (tok/s)** | **Throughput speedup (TF32 to Mixed precision)** | **Strong Scaling - TF32** | **Strong Scaling - Mixed precision** |
| --- | --- | ------ | ------ | ----- | ----- | ----- |
| 1 | 128 | 83214 | 140909 | 1.693 | 1.000 | 1.000 |
| 4 | 128 | 278576 | 463144 | 1.663 | 3.348 | 3.287 |
| 8 | 128 | 519952 | 822024 | 1.581 | 6.248 | 5.834 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
##### Training throughput: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `train.py` training script in the
pytorch-20.06-py3 NGC container on NVIDIA DGX-1 with 8x V100 16GB GPUs.
Throughput performance numbers (in tokens per second) were averaged over an
entire training epoch.
| **GPUs** | **Batch size / GPU** | **Throughput - FP32 (tok/s)** | **Throughput - Mixed precision (tok/s)** | **Throughput speedup (FP32 to Mixed precision)** | **Strong Scaling - FP32** | **Strong Scaling - Mixed precision** |
| --- | --- | ------ | ------ | ----- | ----- | ----- |
| 1 | 128 | 21860 | 76438 | 3.497 | 1.000 | 1.000 |
| 4 | 128 | 80224 | 249168 | 3.106 | 3.670 | 3.260 |
| 8 | 128 | 154168 | 447832 | 2.905 | 7.053 | 5.859 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
##### Training throughput: NVIDIA DGX-2H (16x V100 32GB)
Our results were obtained by running the `train.py` training script in the
pytorch-20.06-py3 NGC container on NVIDIA DGX-2H with 16x V100 32GB GPUs.
Throughput performance numbers (in tokens per second) were averaged over an
entire training epoch.
| **GPUs** | **Batch size / GPU** | **Throughput - FP32 (tok/s)** | **Throughput - Mixed precision (tok/s)** | **Throughput speedup (FP32 to Mixed precision)** | **Strong Scaling - FP32** | **Strong Scaling - Mixed precision** |
| --- | --- | ------ | ------ | ----- | ------ | ------ |
| 1 | 128 | 25583 | 87829 | 3.433 | 1.000 | 1.000 |
| 4 | 128 | 91400 | 290640 | 3.180 | 3.573 | 3.309 |
| 8 | 128 | 176616 | 522008 | 2.956 | 6.904 | 5.943 |
| 16 | 128 | 351792 | 1010880 | 2.874 | 13.751 | 11.510 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
#### Inference accuracy results
##### Inference accuracy: NVIDIA A100 40GB
Our results were obtained by running the `translate.py` script in the
pytorch-20.06-py3 NGC Docker container with NVIDIA A100 40GB GPU. Full
command to launch the inference accuracy benchmark was provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
| **Batch Size** | **Beam Size** | **Accuracy - TF32 (BLEU)** | **Accuracy - FP16 (BLEU)** |
| -------------: | ------------: | -------------------------: | -------------------------: |
| 128 | 1 | 23.07 | 23.07 |
| 128 | 2 | 23.81 | 23.81 |
| 128 | 5 | 24.41 | 24.43 |
##### Inference accuracy: NVIDIA Tesla V100 16GB
Our results were obtained by running the `translate.py` script in the
pytorch-20.06-py3 NGC Docker container with NVIDIA Tesla V100 16GB GPU. Full
command to launch the inference accuracy benchmark was provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
| **Batch Size** | **Beam Size** | **Accuracy - FP32 (BLEU)** | **Accuracy - FP16 (BLEU)** |
| -------------: | ------------: | -------------------------: | -------------------------: |
| 128 | 1 | 23.07 | 23.07 |
| 128 | 2 | 23.81 | 23.79 |
| 128 | 5 | 24.40 | 24.43 |
##### Inference accuracy: NVIDIA T4
Our results were obtained by running the `translate.py` script in the
pytorch-20.06-py3 NGC Docker container with NVIDIA Tesla T4. Full command to
launch the inference accuracy benchmark was provided in the [Inference
performance benchmark](#inference-performance-benchmark) section.
| **Batch Size** | **Beam Size** | **Accuracy - FP32 (BLEU)** | **Accuracy - FP16 (BLEU)** |
| -------------: | ------------: | -------------------------: | -------------------------: |
| 128 | 1 | 23.07 | 23.08 |
| 128 | 2 | 23.81 | 23.80 |
| 128 | 5 | 24.40 | 24.39 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
#### Inference throughput results
Tables presented in this section show the average inference throughput (columns
**Avg (tok/s)**) and inference throughput for various confidence intervals
(columns **N% (ms)**, where `N` denotes the confidence interval). Inference
throughput is measured in tokens per second. Speedups reported in FP16
subsections are relative to FP32 (for NVIDIA Volta and NVIDIA Turing) and
relative to TF32 (for NVIDIA Ampere) numbers for corresponding configuration.
##### Inference throughput: NVIDIA A100 40GB
Our results were obtained by running the `translate.py` script in the
pytorch-20.06-py3 NGC Docker container with NVIDIA A100 40GB.
Full command to launch the inference throughput benchmark was provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16**
|**Batch Size**|**Beam Size**|**Avg (tok/s)**|**Speedup**|**90% (tok/s)**|**Speedup**|**95% (tok/s)**|**Speedup**|**99% (tok/s)**|**Speedup**|
|-------------:|------------:|--------------:|----------:|--------------:|----------:|--------------:|----------:|--------------:|----------:|
| 1| 1| 1291.6| 1.031| 1195.7| 1.029| 1165.8| 1.029| 1104.7| 1.030|
| 1| 2| 882.7| 1.019| 803.4| 1.015| 769.2| 1.015| 696.7| 1.017|
| 1| 5| 848.3| 1.042| 753.0| 1.037| 715.0| 1.043| 636.4| 1.033|
| 2| 1| 2060.5| 1.034| 1700.8| 1.032| 1621.8| 1.032| 1487.4| 1.022|
| 2| 2| 1445.7| 1.026| 1197.6| 1.024| 1132.5| 1.023| 1043.7| 1.033|
| 2| 5| 1402.3| 1.063| 1152.4| 1.056| 1100.5| 1.053| 992.9| 1.053|
| 4| 1| 3465.6| 1.046| 2838.3| 1.040| 2672.7| 1.043| 2392.8| 1.043|
| 4| 2| 2425.4| 1.041| 2002.5| 1.028| 1898.3| 1.033| 1690.2| 1.028|
| 4| 5| 2364.4| 1.075| 1930.0| 1.067| 1822.0| 1.065| 1626.1| 1.058|
| 8| 1| 6151.1| 1.099| 5078.0| 1.087| 4786.5| 1.096| 4206.9| 1.090|
| 8| 2| 4241.9| 1.075| 3494.1| 1.066| 3293.6| 1.066| 2970.9| 1.064|
| 8| 5| 4117.7| 1.118| 3430.9| 1.103| 3224.5| 1.104| 2833.5| 1.110|
| 32| 1| 18830.4| 1.147| 16210.0| 1.152| 15563.9| 1.138| 13973.2| 1.135|
| 32| 2| 12698.2| 1.133| 10812.3| 1.114| 10256.1| 1.145| 9330.2| 1.101|
| 32| 5| 11802.6| 1.355| 9998.8| 1.318| 9671.6| 1.329| 9058.4| 1.335|
| 128| 1| 53394.5| 1.350| 48867.6| 1.342| 46898.5| 1.414| 40670.6| 1.305|
| 128| 2| 34876.4| 1.483| 31687.4| 1.491| 30025.4| 1.505| 27677.1| 1.421|
| 128| 5| 28201.3| 1.986| 25660.5| 1.997| 24306.0| 1.967| 23326.2| 2.007|
| 512| 1| 119675.3| 1.904| 112400.5| 1.971| 109694.8| 1.927| 108781.3| 1.919|
| 512| 2| 74514.7| 2.126| 69578.9| 2.209| 69348.1| 2.210| 69253.7| 2.212|
| 512| 5| 47003.2| 2.760| 43348.2| 2.893| 43080.3| 2.884| 42878.4| 2.881|
##### Inference throughput: NVIDIA T4
Our results were obtained by running the `translate.py` script in the
pytorch-20.06-py3 NGC Docker container with NVIDIA T4.
Full command to launch the inference throughput benchmark was provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16**
|**Batch Size**|**Beam Size**|**Avg (tok/s)**|**Speedup**|**90% (tok/s)**|**Speedup**|**95% (tok/s)**|**Speedup**|**99% (tok/s)**|**Speedup**|
|-------------:|------------:|--------------:|----------:|--------------:|----------:|--------------:|----------:|--------------:|----------:|
| 1| 1| 1133.8| 1.266| 1059.1| 1.253| 1036.6| 1.251| 989.5| 1.242|
| 1| 2| 793.9| 1.169| 728.3| 1.165| 698.1| 1.163| 637.1| 1.157|
| 1| 5| 766.8| 1.343| 685.6| 1.335| 649.3| 1.335| 584.1| 1.318|
| 2| 1| 1759.8| 1.233| 1461.6| 1.239| 1402.3| 1.242| 1302.1| 1.242|
| 2| 2| 1313.3| 1.186| 1088.7| 1.185| 1031.6| 1.180| 953.2| 1.178|
| 2| 5| 1257.2| 1.301| 1034.1| 1.316| 990.3| 1.313| 886.3| 1.265|
| 4| 1| 2974.0| 1.261| 2440.3| 1.255| 2294.6| 1.257| 2087.7| 1.261|
| 4| 2| 2204.7| 1.320| 1826.3| 1.283| 1718.9| 1.260| 1548.4| 1.260|
| 4| 5| 2106.1| 1.340| 1727.8| 1.345| 1625.7| 1.353| 1467.7| 1.346|
| 8| 1| 5076.6| 1.423| 4207.9| 1.367| 3904.4| 1.360| 3475.3| 1.355|
| 8| 2| 3761.7| 1.311| 3108.1| 1.285| 2931.6| 1.300| 2628.7| 1.300|
| 8| 5| 3578.2| 1.660| 2998.2| 1.614| 2812.1| 1.609| 2447.6| 1.523|
| 32| 1| 14637.8| 1.636| 12702.5| 1.644| 12070.3| 1.634| 11036.9| 1.647|
| 32| 2| 10627.3| 1.818| 9198.3| 1.818| 8431.6| 1.725| 8000.0| 1.773|
| 32| 5| 8205.7| 2.598| 7117.6| 2.476| 6825.2| 2.497| 6293.2| 2.437|
| 128| 1| 33800.5| 2.755| 30824.5| 2.816| 27685.2| 2.661| 26580.9| 2.694|
| 128| 2| 20829.4| 2.795| 18665.2| 2.778| 17372.1| 2.639| 16820.5| 2.821|
| 128| 5| 11753.9| 3.309| 10658.1| 3.273| 10308.7| 3.205| 9630.7| 3.328|
| 512| 1| 44474.6| 3.327| 40108.1| 3.394| 39816.6| 3.378| 39708.0| 3.381|
| 512| 2| 26057.9| 3.295| 23197.3| 3.294| 23019.8| 3.284| 22951.4| 3.284|
| 512| 5| 12161.5| 3.428| 10777.5| 3.418| 10733.1| 3.414| 10710.5| 3.420|
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
#### Inference latency results
Tables presented in this section show the average inference latency (columns **Avg
(ms)**) and inference latency for various confidence intervals (columns **N%
(ms)**, where `N` denotes the confidence interval). Inference latency is
measured in milliseconds. Speedups reported in FP16 subsections are relative to
FP32 (for NVIDIA Volta and NVIDIA Turing) and relative to TF32 (for NVIDIA
Ampere) numbers for corresponding configuration.
##### Inference latency: NVIDIA A100 40GB
Our results were obtained by running the `translate.py` script in the
pytorch-20.06-py3 NGC Docker container with NVIDIA A100 40GB.
Full command to launch the inference latency benchmark was provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16**
|**Batch Size**|**Beam Size**|**Avg (ms)**|**Speedup**|**90% (ms)**|**Speedup**|**95% (ms)**|**Speedup**|**99% (ms)**|**Speedup**|
|-------------:|------------:|-----------:|----------:|-----------:|----------:|-----------:|----------:|-----------:|----------:|
| 1| 1| 44.69| 1.032| 74.04| 1.035| 84.61| 1.034| 99.14| 1.042|
| 1| 2| 64.76| 1.020| 105.18| 1.018| 118.92| 1.019| 139.42| 1.023|
| 1| 5| 67.06| 1.043| 107.56| 1.049| 121.82| 1.054| 143.85| 1.054|
| 2| 1| 56.57| 1.034| 85.59| 1.037| 92.55| 1.038| 107.59| 1.046|
| 2| 2| 80.22| 1.027| 119.22| 1.027| 128.43| 1.030| 150.06| 1.028|
| 2| 5| 82.54| 1.063| 121.37| 1.067| 132.35| 1.069| 156.34| 1.059|
| 4| 1| 67.29| 1.047| 92.69| 1.048| 100.08| 1.056| 112.63| 1.064|
| 4| 2| 95.86| 1.041| 129.83| 1.040| 139.48| 1.044| 162.34| 1.045|
| 4| 5| 98.34| 1.075| 133.83| 1.076| 142.70| 1.068| 168.30| 1.075|
| 8| 1| 75.60| 1.099| 97.87| 1.103| 104.13| 1.099| 117.40| 1.102|
| 8| 2| 109.38| 1.074| 137.71| 1.079| 147.69| 1.069| 168.79| 1.065|
| 8| 5| 112.71| 1.116| 143.50| 1.104| 153.17| 1.118| 172.60| 1.113|
| 32| 1| 98.40| 1.146| 117.02| 1.153| 123.42| 1.150| 129.01| 1.128|
| 32| 2| 145.87| 1.133| 171.71| 1.159| 184.01| 1.127| 188.64| 1.141|
| 32| 5| 156.82| 1.357| 189.10| 1.374| 194.95| 1.392| 196.65| 1.419|
| 128| 1| 137.97| 1.350| 150.04| 1.348| 151.52| 1.349| 154.52| 1.434|
| 128| 2| 211.58| 1.484| 232.96| 1.490| 237.46| 1.505| 239.86| 1.567|
| 128| 5| 261.44| 1.990| 288.54| 2.017| 291.63| 2.052| 298.73| 2.136|
| 512| 1| 245.93| 1.906| 262.51| 1.998| 264.24| 1.999| 265.23| 2.000|
| 512| 2| 395.61| 2.129| 428.54| 2.219| 431.58| 2.224| 433.86| 2.227|
| 512| 5| 627.21| 2.767| 691.72| 2.878| 696.01| 2.895| 702.13| 2.887|
##### Inference latency: NVIDIA T4
Our results were obtained by running the `translate.py` script in the
pytorch-20.06-py3 NGC Docker container with NVIDIA T4.
Full command to launch the inference latency benchmark was provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16**
|**Batch Size**|**Beam Size**|**Avg (ms)**|**Speedup**|**90% (ms)**|**Speedup**|**95% (ms)**|**Speedup**|**99% (ms)**|**Speedup**|
|-------------:|------------:|-----------:|----------:|-----------:|----------:|-----------:|----------:|-----------:|----------:|
| 1| 1| 51.08| 1.261| 84.82| 1.254| 97.45| 1.251| 114.6| 1.257|
| 1| 2| 72.05| 1.168| 117.41| 1.165| 132.33| 1.170| 155.8| 1.174|
| 1| 5| 74.20| 1.345| 119.45| 1.352| 135.07| 1.354| 160.3| 1.354|
| 2| 1| 66.31| 1.232| 100.90| 1.232| 108.52| 1.235| 126.9| 1.238|
| 2| 2| 88.35| 1.185| 131.47| 1.188| 141.46| 1.185| 164.7| 1.191|
| 2| 5| 92.12| 1.305| 136.30| 1.310| 148.66| 1.309| 174.8| 1.320|
| 4| 1| 78.54| 1.260| 108.53| 1.256| 117.19| 1.259| 133.7| 1.259|
| 4| 2| 105.54| 1.315| 142.74| 1.317| 154.36| 1.307| 178.7| 1.303|
| 4| 5| 110.43| 1.351| 150.62| 1.388| 161.61| 1.397| 191.2| 1.427|
| 8| 1| 91.65| 1.418| 117.92| 1.421| 126.60| 1.405| 144.0| 1.411|
| 8| 2| 123.39| 1.315| 156.00| 1.337| 167.34| 1.347| 193.4| 1.340|
| 8| 5| 129.69| 1.666| 165.01| 1.705| 178.18| 1.723| 200.3| 1.765|
| 32| 1| 126.53| 1.641| 153.23| 1.689| 159.58| 1.692| 167.0| 1.700|
| 32| 2| 174.37| 1.822| 209.04| 1.899| 219.59| 1.877| 228.6| 1.878|
| 32| 5| 226.15| 2.598| 277.38| 2.636| 290.27| 2.648| 299.4| 2.664|
| 128| 1| 218.29| 2.755| 238.94| 2.826| 243.18| 2.843| 267.1| 2.828|
| 128| 2| 354.83| 2.796| 396.63| 2.832| 410.53| 2.803| 433.2| 2.866|
| 128| 5| 628.32| 3.311| 699.57| 3.353| 723.98| 3.323| 771.0| 3.337|
| 512| 1| 663.07| 3.330| 748.62| 3.388| 753.20| 3.388| 758.0| 3.378|
| 512| 2| 1134.04| 3.295| 1297.85| 3.283| 1302.25| 3.304| 1306.9| 3.308|
| 512| 5| 2428.82| 3.428| 2771.72| 3.415| 2801.32| 3.427| 2817.6| 3.422|
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
## Release notes
### Changelog
* July 2020
* Added support for NVIDIA DGX A100
* Default container updated to NGC PyTorch 20.06-py3
* June 2019
* Default container updated to NGC PyTorch 19.05-py3
* Mixed precision training implemented using APEX AMP
* Added inference throughput and latency results on NVIDIA T4 and NVIDIA
Tesla V100 16GB
* Added option to run inference on user-provided raw input text from command
line
* February 2019
* Different batching algorithm (bucketing with 5 equal-width buckets)
* Additional dropouts before first LSTM layer in encoder and in decoder
* Weight initialization changed to uniform (-0.1,0.1)
* Switched order of dropout and concatenation with attention in decoder
* Default container updated to NGC PyTorch 19.01-py3
* December 2018
* Added exponential warm-up and step learning rate decay
* Multi-GPU (distributed) inference and validation
* Default container updated to NGC PyTorch 18.11-py3
* General performance improvements
* August 2018
* Initial release
### Known issues
There are no known issues in this release.
|
PyTorch/Forecasting/TFT/triton/deployment_toolkit/bermuda | bermuda | onnx | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple(_get_dim(d) for d in shape.dim)
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
|
TensorFlow/Classification/ConvNets/resnext101-32x4d/training | training | training_perf | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAX_FP32_BS=${1:-64}
MAX_AMP_BS=${2:-128}
GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader | uniq)
GPU_COUNT=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader | wc -l)
function run_benchmark() {
BATCH_SIZE=$1
MODE_SIZE=$2
if [[ $4 -eq "1" ]]; then
XLA="--xla"
else
XLA=""
fi
case $2 in
"amp") MODE_FLAGS="--amp --static_loss_scale 128";;
"fp32"|"tf32") MODE_FLAGS="";;
*) echo "Unsupported configuration, use amp, tf32 or fp32";;
esac
CMD_LINE="--arch=resnext101-32x4d --mode=training_benchmark --warmup_steps 200 --num_iter 500 --iter_unit batch --batch_size $BATCH_SIZE \
--data_dir=/data/tfrecords/ --results_dir=/tmp/result $MODE_FLAGS $XLA"
mkdir -p /tmp/result/
if [[ $3 -eq "1" ]]; then
python ./main.py ${CMD_LINE} > /tmp/result/logs.txt
else
mpiexec --allow-run-as-root --bind-to socket -np $3 python3 main.py ${CMD_LINE} > /tmp/result/logs.txt
fi
tail -n1 /tmp/result/logs.txt | sed \
's/^DLL \([0-9]*-\)*[0-9]* \([0-9]*:\)*[0-9]*.[0-9]* - ()/BS='$BATCH_SIZE','$2',XLA='$4'/' >> ./training_benchmark.txt
rm -rf /tmp/result
}
run_benchmark $MAX_AMP_BS amp 1 0
run_benchmark $MAX_AMP_BS amp 1 1
run_benchmark $MAX_FP32_BS fp32 1 0
run_benchmark $MAX_FP32_BS fp32 1 1
if [[ $GPU_COUNT -ne "1" ]]; then
run_benchmark $MAX_AMP_BS amp $GPU_COUNT 0
run_benchmark $MAX_AMP_BS amp $GPU_COUNT 1
run_benchmark $MAX_FP32_BS fp32 $GPU_COUNT 0
run_benchmark $MAX_FP32_BS fp32 $GPU_COUNT 1
fi
cat ./training_benchmark.txt |
PyTorch/SpeechRecognition/Jasper/common/text | text | cleaners | # Copyright (c) 2017 Keith Ito
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" from https://github.com/keithito/tacotron
Modified to add puncturation removal
"""
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from .numbers import normalize_numbers
from .unidecoder import unidecoder
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
text2 = unidecoder(text)
if text != text2:
print(text)
print(text2)
return unidecoder(text)
def remove_punctuation(text, table):
text = text.translate(table)
text = re.sub(r'&', " and ", text)
text = re.sub(r'\+', " plus ", text)
return text
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text, table=None):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
if table is not None:
text = remove_punctuation(text, table)
text = collapse_whitespace(text)
return text
|
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/training/FP32 | FP32 | train_benchmark_8xV100-32G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v2/s_cfg.py \
--mode train_and_eval \
--use_xla \
--model_dir ./output/ \
--data_dir /data/ \
--log_steps 500 \
--save_checkpoint_freq 10 \
--n_stages 1 \
--max_epochs 3 \
--steps_per_epoch 2000 \
--train_batch_size 64 \
--train_img_size 300 \
--lr_decay cosine \
--lr_init 0.005 \
--weight_decay .000005 \
--opt_epsilon 0.001 \
--moving_average_decay 0.9999 \
--eval_img_size 384 \
--eval_batch_size 128 \
--augmenter_name randaugment \
--raug_num_layers 2 \
--raug_magnitude 15 \
--cutmix_alpha 0 \
--mixup_alpha 0 \
--defer_img_mixing |
TensorFlow/LanguageModeling/Transformer-XL/tf | tf | model | import tensorflow as tf
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = tf.einsum('i,j->ij', pos_seq, inv_freq)
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
if bsz is not None:
return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
else:
return pos_emb[:, None, :]
def positionwise_FF(inp, d_model, d_inner, dropout, kernel_initializer,
scope='ff', is_training=True):
output = inp
with tf.variable_scope(scope):
output = tf.layers.dense(inp, d_inner, activation=tf.nn.relu,
kernel_initializer=kernel_initializer,
name='layer_1')
output = tf.layers.dropout(output, dropout, training=is_training,
name='drop_1')
output = tf.layers.dense(output, d_model,
kernel_initializer=kernel_initializer,
name='layer_2')
output = tf.layers.dropout(output, dropout, training=is_training,
name='drop_2')
output = tf.contrib.layers.layer_norm(output + inp, begin_norm_axis=-1)
return output
def rel_shift(x):
x_size = tf.shape(x)
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = tf.reshape(x, [x_size[0], x_size[1], x_size[3] + 1, x_size[2]])
x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
x = tf.reshape(x, x_size)
return x
def rel_multihead_attn(w, r, r_w_bias, r_r_bias, attn_mask, mems, d_model,
n_head, d_head, dropout, dropatt, is_training,
kernel_initializer, scope='rel_attn'):
scale = 1 / (d_head ** 0.5)
with tf.variable_scope(scope):
qlen = tf.shape(w)[0]
rlen = tf.shape(r)[0]
bsz = tf.shape(w)[1]
cat = tf.concat([mems, w],
0) if mems is not None and mems.shape.ndims > 1 else w
w_heads = tf.layers.dense(cat, 3 * n_head * d_head, use_bias=False,
kernel_initializer=kernel_initializer, name='qkv')
r_head_k = tf.layers.dense(r, n_head * d_head, use_bias=False,
kernel_initializer=kernel_initializer, name='r')
w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, -1)
w_head_q = w_head_q[-qlen:]
klen = tf.shape(w_head_k)[0]
w_head_q = tf.reshape(w_head_q, [qlen, bsz, n_head, d_head])
w_head_k = tf.reshape(w_head_k, [klen, bsz, n_head, d_head])
w_head_v = tf.reshape(w_head_v, [klen, bsz, n_head, d_head])
r_head_k = tf.reshape(r_head_k, [rlen, n_head, d_head])
rw_head_q = w_head_q + r_w_bias
rr_head_q = w_head_q + r_r_bias
AC = tf.einsum('ibnd,jbnd->bnij', rw_head_q, w_head_k)
BD = tf.einsum('ibnd,jnd->bnij', rr_head_q, r_head_k)
BD = rel_shift(BD)
attn_score = (AC + BD) * scale
attn_mask_t = attn_mask[None, None, :, :]
attn_score = attn_score * (1 - attn_mask_t) - 1e30 * attn_mask_t
attn_prob = tf.nn.softmax(attn_score, 3)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
attn_vec = tf.einsum('bnij,jbnd->ibnd', attn_prob, w_head_v)
size_t = tf.shape(attn_vec)
attn_vec = tf.reshape(attn_vec, [size_t[0], size_t[1], n_head * d_head])
attn_out = tf.layers.dense(attn_vec, d_model, use_bias=False,
kernel_initializer=kernel_initializer, name='o')
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
output = tf.contrib.layers.layer_norm(attn_out + w, begin_norm_axis=-1)
return output
def embedding_lookup(lookup_table, x, use_tpu=True):
if use_tpu:
n_token = tf.shape(lookup_table)[0]
one_hot_idx = tf.one_hot(x, n_token)
if one_hot_idx.shape.ndims == 2:
return tf.einsum('nd,in->id', lookup_table, one_hot_idx)
else:
return tf.einsum('nd,ibn->ibd', lookup_table, one_hot_idx)
else:
return tf.nn.embedding_lookup(lookup_table, x)
def mask_adaptive_embedding_lookup(x, n_token, d_embed, d_proj, cutoffs, initializer,
proj_initializer, div_val=1,
proj_same_dim=True,
scope='adaptive_embed', **kwargs):
emb_scale = d_proj ** 0.5
with tf.variable_scope(scope):
if div_val == 1:
lookup_table = tf.get_variable('lookup_table', [n_token, d_embed],
initializer=initializer)
y = embedding_lookup(lookup_table, x, use_tpu=False)
if d_proj != d_embed:
proj_W = tf.get_variable('proj_W', [d_embed, d_proj],
initializer=proj_initializer)
y = tf.einsum('ibe,ed->ibd', y, proj_W)
else:
proj_W = None
ret_params = [lookup_table, proj_W]
else:
tables, projs = [], []
cutoff_ends = [0] + cutoffs + [n_token]
x_size = tf.shape(x)
y = tf.zeros([x_size[0], x_size[1], d_proj])
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope('cutoff_{}'.format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
mask = (x >= l_idx) & (x < r_idx)
cur_x = tf.boolean_mask(x, mask) - l_idx
cur_d_embed = d_embed // (div_val ** i)
lookup_table = tf.get_variable('lookup_table',
[r_idx - l_idx, cur_d_embed],
initializer=initializer)
cur_y = embedding_lookup(lookup_table, cur_x, use_tpu=False)
if d_proj == cur_d_embed and not proj_same_dim:
proj_W = None
else:
proj_W = tf.get_variable('proj_W', [cur_d_embed, d_proj],
initializer=proj_initializer)
cur_y = tf.einsum('id,de->ie', cur_y, proj_W)
mask_idx = tf.to_int64(tf.where(mask))
y += tf.scatter_nd(mask_idx, cur_y, tf.to_int64(tf.shape(y)))
tables.append(lookup_table)
projs.append(proj_W)
ret_params = [tables, projs]
y *= emb_scale
return y, ret_params
def mul_adaptive_embedding_lookup(x, n_token, d_embed, d_proj, cutoffs, initializer,
proj_initializer, div_val=1, perms=None,
proj_same_dim=True,
scope='adaptive_embed'):
"""
perms: If None, first compute W = W1 x W2 (projection for each bin),
and then compute X x W (embedding lookup). If not None,
use bin-based embedding lookup with max_bin_size defined by
the shape of perms.
"""
emb_scale = d_proj ** 0.5
with tf.variable_scope(scope):
if div_val == 1:
lookup_table = tf.get_variable('lookup_table', [n_token, d_embed],
initializer=initializer)
y = embedding_lookup(lookup_table, x)
if d_proj != d_embed:
proj_W = tf.get_variable('proj_W', [d_embed, d_proj],
initializer=proj_initializer)
y = tf.einsum('ibe,ed->ibd', y, proj_W)
else:
proj_W = None
ret_params = [lookup_table, proj_W]
else:
tables, projs = [], []
cutoff_ends = [0] + cutoffs + [n_token]
x_size = tf.shape(x)
if perms is None:
cat_lookup = []
else:
cat_lookup = tf.zeros([x_size[0], x_size[1], d_proj])
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope('cutoff_{}'.format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
cur_d_embed = d_embed // (div_val ** i)
lookup_table = tf.get_variable('lookup_table',
[r_idx - l_idx, cur_d_embed],
initializer=initializer)
if cur_d_embed == d_proj and not proj_same_dim:
proj_W = None
else:
proj_W = tf.get_variable('proj_W', [cur_d_embed, d_proj],
initializer=proj_initializer)
if perms is None:
cat_lookup.append(tf.einsum('ie,ed->id', lookup_table, proj_W))
else:
# speed up the computation of the first bin
# also save some meory
if i == 0:
cur_y = embedding_lookup(lookup_table, tf.minimum(x, r_idx - 1))
if proj_W is not None:
cur_y = tf.einsum('ibe,ed->ibd', cur_y, proj_W)
cur_y *= perms[i][:, :, None]
cat_lookup += cur_y
else:
cur_x = tf.einsum('ib,ibk->k', tf.to_float(x - l_idx), perms[i])
cur_x = tf.to_int32(cur_x)
cur_y = embedding_lookup(lookup_table, cur_x)
if proj_W is not None:
cur_y = tf.einsum('ke,ed->kd', cur_y, proj_W)
cat_lookup += tf.einsum('kd,ibk->ibd', cur_y, perms[i])
tables.append(lookup_table)
projs.append(proj_W)
if perms is None:
cat_lookup = tf.concat(cat_lookup, 0)
y = embedding_lookup(cat_lookup, x)
else:
y = cat_lookup
ret_params = [tables, projs]
y *= emb_scale
return y, ret_params
def mask_adaptive_logsoftmax(hidden, target, n_token, d_embed, d_proj, cutoffs,
params, tie_projs,
initializer=None, proj_initializer=None,
div_val=1, scope='adaptive_softmax',
proj_same_dim=True,
return_mean=True, **kwargs):
def _logit(x, W, b, proj):
y = x
if proj is not None:
y = tf.einsum('ibd,ed->ibe', y, proj)
return tf.einsum('ibd,nd->ibn', y, W) + b
params_W, params_projs = params[0], params[1]
def _gather_logprob(logprob, target):
lp_size = tf.shape(logprob)
r = tf.range(lp_size[0])
idx = tf.stack([r, target], 1)
return tf.gather_nd(logprob, idx)
with tf.variable_scope(scope):
if len(cutoffs) == 0:
softmax_b = tf.get_variable('bias', [n_token],
initializer=tf.zeros_initializer())
output = _logit(hidden, params_W, softmax_b, params_projs)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target,
logits=output)
else:
cutoff_ends = [0] + cutoffs + [n_token]
nll = tf.zeros_like(target, dtype=tf.float32)
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope('cutoff_{}'.format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
mask = (target >= l_idx) & (target < r_idx)
mask_idx = tf.where(mask)
cur_target = tf.boolean_mask(target, mask) - l_idx
cur_d_embed = d_embed // (div_val ** i)
if div_val == 1:
cur_W = params_W[l_idx: r_idx]
else:
cur_W = params_W[i]
cur_b = tf.get_variable('b', [r_idx - l_idx],
initializer=tf.zeros_initializer())
if tie_projs[i]:
if div_val == 1:
cur_proj = params_projs
else:
cur_proj = params_projs[i]
else:
if (div_val == 1 or not proj_same_dim) and d_proj == cur_d_embed:
cur_proj = None
else:
cur_proj = tf.get_variable('proj', [cur_d_embed, d_proj],
initializer=proj_initializer)
if i == 0:
cluster_W = tf.get_variable('cluster_W', [len(cutoffs), d_embed],
initializer=tf.zeros_initializer())
cluster_b = tf.get_variable('cluster_b', [len(cutoffs)],
initializer=tf.zeros_initializer())
cur_W = tf.concat([cur_W, cluster_W], 0)
cur_b = tf.concat([cur_b, cluster_b], 0)
head_logit = _logit(hidden, cur_W, cur_b, cur_proj)
head_logprob = tf.nn.log_softmax(head_logit)
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_logprob = _gather_logprob(cur_head_logprob, cur_target)
else:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_hidden = tf.boolean_mask(hidden, mask)
tail_logit = tf.squeeze(_logit(
cur_hidden[None], cur_W, cur_b, cur_proj), 0)
tail_logprob = tf.nn.log_softmax(tail_logit)
cur_logprob = (cur_head_logprob[:, cutoff_ends[1] + i - 1] +
_gather_logprob(tail_logprob, cur_target))
nll += tf.scatter_nd(mask_idx, -cur_logprob,
tf.to_int64(tf.shape(nll)))
if return_mean:
nll = tf.reduce_mean(nll)
return nll
def mul_adaptive_logsoftmax(hidden, target, n_token, d_embed, d_proj, cutoffs,
params, tie_projs,
initializer=None, proj_initializer=None,
div_val=1, perms=None, proj_same_dim=True,
scope='adaptive_softmax',
**kwargs):
def _logit(x, W, b, proj):
y = x
if x.shape.ndims == 3:
if proj is not None:
y = tf.einsum('ibd,ed->ibe', y, proj)
return tf.einsum('ibd,nd->ibn', y, W) + b
else:
if proj is not None:
y = tf.einsum('id,ed->ie', y, proj)
return tf.einsum('id,nd->in', y, W) + b
params_W, params_projs = params[0], params[1]
with tf.variable_scope(scope):
if len(cutoffs) == 0:
softmax_b = tf.get_variable('bias', [n_token],
initializer=tf.zeros_initializer())
output = _logit(hidden, params_W, softmax_b, params_projs)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target,
logits=output)
nll = tf.reduce_mean(nll)
else:
total_loss, total_cnt = 0, 0
cutoff_ends = [0] + cutoffs + [n_token]
for i in range(len(cutoff_ends) - 1):
with tf.variable_scope('cutoff_{}'.format(i)):
l_idx, r_idx = cutoff_ends[i], cutoff_ends[i + 1]
cur_d_embed = d_embed // (div_val ** i)
if div_val == 1:
cur_W = params_W[l_idx: r_idx]
else:
cur_W = params_W[i]
cur_b = tf.get_variable('b', [r_idx - l_idx],
initializer=tf.zeros_initializer())
if tie_projs[i]:
if div_val == 1:
cur_proj = params_projs
else:
cur_proj = params_projs[i]
else:
if (div_val == 1 or not proj_same_dim) and d_proj == cur_d_embed:
cur_proj = None
else:
cur_proj = tf.get_variable('proj', [cur_d_embed, d_proj],
initializer=proj_initializer)
if i == 0:
cluster_W = tf.get_variable('cluster_W', [len(cutoffs), d_embed],
initializer=tf.zeros_initializer())
cluster_b = tf.get_variable('cluster_b', [len(cutoffs)],
initializer=tf.zeros_initializer())
cur_W = tf.concat([cur_W, cluster_W], 0)
cur_b = tf.concat([cur_b, cluster_b], 0)
head_logit = _logit(hidden, cur_W, cur_b, cur_proj)
head_target = kwargs.get("head_target")
head_nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=head_target,
logits=head_logit)
masked_loss = head_nll * perms[i]
total_loss += tf.reduce_sum(masked_loss)
total_cnt += tf.reduce_sum(perms[i])
else:
cur_head_nll = tf.einsum('ib,ibk->k', head_nll, perms[i])
cur_hidden = tf.einsum('ibd,ibk->kd', hidden, perms[i])
tail_logit = _logit(cur_hidden, cur_W, cur_b, cur_proj)
tail_target = tf.einsum('ib,ibk->k', tf.to_float(target - l_idx),
perms[i])
tail_nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.to_int32(tail_target),
logits=tail_logit)
sum_nll = cur_head_nll + tail_nll
mask = tf.reduce_sum(perms[i], [0, 1])
masked_loss = sum_nll * mask
total_loss += tf.reduce_sum(masked_loss)
total_cnt += tf.reduce_sum(mask)
nll = total_loss / total_cnt
return nll
def _create_mask(qlen, mlen, same_length=False):
attn_mask = tf.ones([qlen, qlen])
mask_u = tf.matrix_band_part(attn_mask, 0, -1)
mask_dia = tf.matrix_band_part(attn_mask, 0, 0)
attn_mask_pad = tf.zeros([qlen, mlen])
ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
if same_length:
mask_l = tf.matrix_band_part(attn_mask, -1, 0)
ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)
return ret
def _cache_mem(curr_out, prev_mem, mem_len=None):
if mem_len is None or prev_mem is None:
new_mem = curr_out
elif mem_len == 0:
return prev_mem
else:
new_mem = tf.concat([prev_mem, curr_out], 0)[- mem_len:]
return tf.stop_gradient(new_mem)
def transformer(dec_inp, target, mems, n_token, n_layer, d_model, d_embed,
n_head, d_head, d_inner, dropout, dropatt,
initializer, is_training, proj_initializer=None,
mem_len=None, cutoffs=[], div_val=1, tie_projs=[],
same_length=False, clamp_len=-1, use_tpu=False,
input_perms=None, target_perms=None, head_target=None,
untie_r=False, proj_same_dim=True,
scope='transformer'):
"""
cutoffs: a list of python int. Cutoffs for adaptive softmax.
tie_projs: a list of python bools. Whether to tie the projections.
use_tpu: if True, use one_hot in embedding lookup and bin-based implementation
of adaptive softmax.
perms: a list of tensors. Each tensor should of size [len, bsz, bin_size].
Only used in the adaptive setting.
"""
new_mems = []
with tf.variable_scope(scope):
if untie_r:
r_w_bias = tf.get_variable('r_w_bias', [n_layer, n_head, d_head],
initializer=initializer)
r_r_bias = tf.get_variable('r_r_bias', [n_layer, n_head, d_head],
initializer=initializer)
else:
r_w_bias = tf.get_variable('r_w_bias', [n_head, d_head],
initializer=initializer)
r_r_bias = tf.get_variable('r_r_bias', [n_head, d_head],
initializer=initializer)
qlen = tf.shape(dec_inp)[0]
mlen = tf.shape(mems[0])[0] if mems is not None else 0
klen = mlen + qlen
if proj_initializer is None:
proj_initializer = initializer
lookup_fn = (mul_adaptive_embedding_lookup if use_tpu else
mask_adaptive_embedding_lookup)
embeddings, shared_params = lookup_fn(
x=dec_inp,
n_token=n_token,
d_embed=d_embed,
d_proj=d_model,
cutoffs=cutoffs,
initializer=initializer,
proj_initializer=proj_initializer,
div_val= div_val,
perms=input_perms,
proj_same_dim=proj_same_dim)
attn_mask = _create_mask(qlen, mlen, same_length)
pos_seq = tf.range(klen - 1, -1, -1.0)
if clamp_len > 0:
pos_seq = tf.minimum(pos_seq, clamp_len)
inv_freq = 1 / (10000 ** (tf.range(0, d_model, 2.0) / d_model))
pos_emb = positional_embedding(pos_seq, inv_freq)
output = tf.layers.dropout(embeddings, dropout, training=is_training)
pos_emb = tf.layers.dropout(pos_emb, dropout, training=is_training)
if mems is None:
mems = [None] * n_layer
for i in range(n_layer):
# cache new mems
new_mems.append(_cache_mem(output, mems[i], mem_len))
with tf.variable_scope('layer_{}'.format(i)):
output = rel_multihead_attn(
w=output,
r=pos_emb,
r_w_bias=r_w_bias if not untie_r else r_w_bias[i],
r_r_bias=r_r_bias if not untie_r else r_r_bias[i],
attn_mask=attn_mask,
mems=mems[i],
d_model=d_model,
n_head=n_head,
d_head=d_head,
dropout=dropout,
dropatt=dropatt,
is_training=is_training,
kernel_initializer=initializer)
output = positionwise_FF(
inp=output,
d_model=d_model,
d_inner=d_inner,
dropout=dropout,
kernel_initializer=initializer,
is_training=is_training)
output = tf.layers.dropout(output, dropout, training=is_training)
logsoftmax_fn = (mul_adaptive_logsoftmax if use_tpu else
mask_adaptive_logsoftmax)
loss = logsoftmax_fn(
hidden=output,
target=target,
n_token=n_token,
d_embed=d_embed,
d_proj=d_model,
cutoffs=cutoffs,
params=shared_params,
tie_projs=tie_projs,
initializer=initializer,
proj_initializer=proj_initializer,
div_val=div_val,
perms=target_perms,
head_target=head_target,
proj_same_dim=proj_same_dim)
return loss, new_mems
|
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch | fastpitch | loss_function | # *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
from torch import nn
from common.utils import mask_from_lens
from fastpitch.attn_loss_function import AttentionCTCLoss
class FastPitchLoss(nn.Module):
def __init__(self, dur_predictor_loss_scale=1.0,
pitch_predictor_loss_scale=1.0, attn_loss_scale=1.0,
energy_predictor_loss_scale=0.1):
super(FastPitchLoss, self).__init__()
self.dur_predictor_loss_scale = dur_predictor_loss_scale
self.pitch_predictor_loss_scale = pitch_predictor_loss_scale
self.energy_predictor_loss_scale = energy_predictor_loss_scale
self.attn_loss_scale = attn_loss_scale
self.attn_ctc_loss = AttentionCTCLoss()
def forward(self, model_out, targets, is_training=True, meta_agg='mean'):
(mel_out, dec_mask, dur_pred, log_dur_pred, pitch_pred, pitch_tgt,
energy_pred, energy_tgt, attn_soft, attn_hard, attn_dur,
attn_logprob) = model_out
(mel_tgt, in_lens, out_lens) = targets
dur_tgt = attn_dur
dur_lens = in_lens
mel_tgt.requires_grad = False
# (B,H,T) => (B,T,H)
mel_tgt = mel_tgt.transpose(1, 2)
dur_mask = mask_from_lens(dur_lens, max_len=dur_tgt.size(1))
log_dur_tgt = torch.log(dur_tgt.float() + 1)
loss_fn = F.mse_loss
dur_pred_loss = loss_fn(log_dur_pred, log_dur_tgt, reduction='none')
dur_pred_loss = (dur_pred_loss * dur_mask).sum() / dur_mask.sum()
ldiff = mel_tgt.size(1) - mel_out.size(1)
mel_out = F.pad(mel_out, (0, 0, 0, ldiff, 0, 0), value=0.0)
mel_mask = mel_tgt.ne(0).float()
loss_fn = F.mse_loss
mel_loss = loss_fn(mel_out, mel_tgt, reduction='none')
mel_loss = (mel_loss * mel_mask).sum() / mel_mask.sum()
ldiff = pitch_tgt.size(2) - pitch_pred.size(2)
pitch_pred = F.pad(pitch_pred, (0, ldiff, 0, 0, 0, 0), value=0.0)
pitch_loss = F.mse_loss(pitch_tgt, pitch_pred, reduction='none')
pitch_loss = (pitch_loss * dur_mask.unsqueeze(1)).sum() / dur_mask.sum()
if energy_pred is not None:
energy_pred = F.pad(energy_pred, (0, ldiff, 0, 0), value=0.0)
energy_loss = F.mse_loss(energy_tgt, energy_pred, reduction='none')
energy_loss = (energy_loss * dur_mask).sum() / dur_mask.sum()
else:
energy_loss = 0
# Attention loss
attn_loss = self.attn_ctc_loss(attn_logprob, in_lens, out_lens)
loss = (mel_loss
+ dur_pred_loss * self.dur_predictor_loss_scale
+ pitch_loss * self.pitch_predictor_loss_scale
+ energy_loss * self.energy_predictor_loss_scale
+ attn_loss * self.attn_loss_scale)
meta = {
'loss': loss.clone().detach(),
'mel_loss': mel_loss.clone().detach(),
'duration_predictor_loss': dur_pred_loss.clone().detach(),
'pitch_loss': pitch_loss.clone().detach(),
'attn_loss': attn_loss.clone().detach(),
'dur_error': (torch.abs(dur_pred - dur_tgt).sum()
/ dur_mask.sum()).detach(),
}
if energy_pred is not None:
meta['energy_loss'] = energy_loss.clone().detach()
assert meta_agg in ('sum', 'mean')
if meta_agg == 'sum':
bsz = mel_out.size(0)
meta = {k: v * bsz for k, v in meta.items()}
return loss, meta
|
PyTorch/Classification/ConvNets | ConvNets | LOC_synset_mapping | ["tench, Tinca tinca",
"goldfish, Carassius auratus",
"great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias",
"tiger shark, Galeocerdo cuvieri",
"hammerhead, hammerhead shark",
"electric ray, crampfish, numbfish, torpedo",
"stingray",
"cock",
"hen",
"ostrich, Struthio camelus",
"brambling, Fringilla montifringilla",
"goldfinch, Carduelis carduelis",
"house finch, linnet, Carpodacus mexicanus",
"junco, snowbird",
"indigo bunting, indigo finch, indigo bird, Passerina cyanea",
"robin, American robin, Turdus migratorius",
"bulbul",
"jay",
"magpie",
"chickadee",
"water ouzel, dipper",
"kite",
"bald eagle, American eagle, Haliaeetus leucocephalus",
"vulture",
"great grey owl, great gray owl, Strix nebulosa",
"European fire salamander, Salamandra salamandra",
"common newt, Triturus vulgaris",
"eft",
"spotted salamander, Ambystoma maculatum",
"axolotl, mud puppy, Ambystoma mexicanum",
"bullfrog, Rana catesbeiana",
"tree frog, tree-frog",
"tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui",
"loggerhead, loggerhead turtle, Caretta caretta",
"leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea",
"mud turtle",
"terrapin",
"box turtle, box tortoise",
"banded gecko",
"common iguana, iguana, Iguana iguana",
"American chameleon, anole, Anolis carolinensis",
"whiptail, whiptail lizard",
"agama",
"frilled lizard, Chlamydosaurus kingi",
"alligator lizard",
"Gila monster, Heloderma suspectum",
"green lizard, Lacerta viridis",
"African chameleon, Chamaeleo chamaeleon",
"Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis",
"African crocodile, Nile crocodile, Crocodylus niloticus",
"American alligator, Alligator mississipiensis",
"triceratops",
"thunder snake, worm snake, Carphophis amoenus",
"ringneck snake, ring-necked snake, ring snake",
"hognose snake, puff adder, sand viper",
"green snake, grass snake",
"king snake, kingsnake",
"garter snake, grass snake",
"water snake",
"vine snake",
"night snake, Hypsiglena torquata",
"boa constrictor, Constrictor constrictor",
"rock python, rock snake, Python sebae",
"Indian cobra, Naja naja",
"green mamba",
"sea snake",
"horned viper, cerastes, sand viper, horned asp, Cerastes cornutus",
"diamondback, diamondback rattlesnake, Crotalus adamanteus",
"sidewinder, horned rattlesnake, Crotalus cerastes",
"trilobite",
"harvestman, daddy longlegs, Phalangium opilio",
"scorpion",
"black and gold garden spider, Argiope aurantia",
"barn spider, Araneus cavaticus",
"garden spider, Aranea diademata",
"black widow, Latrodectus mactans",
"tarantula",
"wolf spider, hunting spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse, partridge, Bonasa umbellus",
"prairie chicken, prairie grouse, prairie fowl",
"peacock",
"quail",
"partridge",
"African grey, African gray, Psittacus erithacus",
"macaw",
"sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"drake",
"red-breasted merganser, Mergus serrator",
"goose",
"black swan, Cygnus atratus",
"tusker",
"echidna, spiny anteater, anteater",
"platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus",
"wallaby, brush kangaroo",
"koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus",
"wombat",
"jellyfish",
"sea anemone, anemone",
"brain coral",
"flatworm, platyhelminth",
"nematode, nematode worm, roundworm",
"conch",
"snail",
"slug",
"sea slug, nudibranch",
"chiton, coat-of-mail shell, sea cradle, polyplacophore",
"chambered nautilus, pearly nautilus, nautilus",
"Dungeness crab, Cancer magister",
"rock crab, Cancer irroratus",
"fiddler crab",
"king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica",
"American lobster, Northern lobster, Maine lobster, Homarus americanus",
"spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish",
"crayfish, crawfish, crawdad, crawdaddy",
"hermit crab",
"isopod",
"white stork, Ciconia ciconia",
"black stork, Ciconia nigra",
"spoonbill",
"flamingo",
"little blue heron, Egretta caerulea",
"American egret, great white heron, Egretta albus",
"bittern",
"crane",
"limpkin, Aramus pictus",
"European gallinule, Porphyrio porphyrio",
"American coot, marsh hen, mud hen, water hen, Fulica americana",
"bustard",
"ruddy turnstone, Arenaria interpres",
"red-backed sandpiper, dunlin, Erolia alpina",
"redshank, Tringa totanus",
"dowitcher",
"oystercatcher, oyster catcher",
"pelican",
"king penguin, Aptenodytes patagonica",
"albatross, mollymawk",
"grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus",
"killer whale, killer, orca, grampus, sea wolf, Orcinus orca",
"dugong, Dugong dugon",
"sea lion",
"Chihuahua",
"Japanese spaniel",
"Maltese dog, Maltese terrier, Maltese",
"Pekinese, Pekingese, Peke",
"Shih-Tzu",
"Blenheim spaniel",
"papillon",
"toy terrier",
"Rhodesian ridgeback",
"Afghan hound, Afghan",
"basset, basset hound",
"beagle",
"bloodhound, sleuthhound",
"bluetick",
"black-and-tan coonhound",
"Walker hound, Walker foxhound",
"English foxhound",
"redbone",
"borzoi, Russian wolfhound",
"Irish wolfhound",
"Italian greyhound",
"whippet",
"Ibizan hound, Ibizan Podenco",
"Norwegian elkhound, elkhound",
"otterhound, otter hound",
"Saluki, gazelle hound",
"Scottish deerhound, deerhound",
"Weimaraner",
"Staffordshire bullterrier, Staffordshire bull terrier",
"American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier",
"Bedlington terrier",
"Border terrier",
"Kerry blue terrier",
"Irish terrier",
"Norfolk terrier",
"Norwich terrier",
"Yorkshire terrier",
"wire-haired fox terrier",
"Lakeland terrier",
"Sealyham terrier, Sealyham",
"Airedale, Airedale terrier",
"cairn, cairn terrier",
"Australian terrier",
"Dandie Dinmont, Dandie Dinmont terrier",
"Boston bull, Boston terrier",
"miniature schnauzer",
"giant schnauzer",
"standard schnauzer",
"Scotch terrier, Scottish terrier, Scottie",
"Tibetan terrier, chrysanthemum dog",
"silky terrier, Sydney silky",
"soft-coated wheaten terrier",
"West Highland white terrier",
"Lhasa, Lhasa apso",
"flat-coated retriever",
"curly-coated retriever",
"golden retriever",
"Labrador retriever",
"Chesapeake Bay retriever",
"German short-haired pointer",
"vizsla, Hungarian pointer",
"English setter",
"Irish setter, red setter",
"Gordon setter",
"Brittany spaniel",
"clumber, clumber spaniel",
"English springer, English springer spaniel",
"Welsh springer spaniel",
"cocker spaniel, English cocker spaniel, cocker",
"Sussex spaniel",
"Irish water spaniel",
"kuvasz",
"schipperke",
"groenendael",
"malinois",
"briard",
"kelpie",
"komondor",
"Old English sheepdog, bobtail",
"Shetland sheepdog, Shetland sheep dog, Shetland",
"collie",
"Border collie",
"Bouvier des Flandres, Bouviers des Flandres",
"Rottweiler",
"German shepherd, German shepherd dog, German police dog, alsatian",
"Doberman, Doberman pinscher",
"miniature pinscher",
"Greater Swiss Mountain dog",
"Bernese mountain dog",
"Appenzeller",
"EntleBucher",
"boxer",
"bull mastiff",
"Tibetan mastiff",
"French bulldog",
"Great Dane",
"Saint Bernard, St Bernard",
"Eskimo dog, husky",
"malamute, malemute, Alaskan malamute",
"Siberian husky",
"dalmatian, coach dog, carriage dog",
"affenpinscher, monkey pinscher, monkey dog",
"basenji",
"pug, pug-dog",
"Leonberg",
"Newfoundland, Newfoundland dog",
"Great Pyrenees",
"Samoyed, Samoyede",
"Pomeranian",
"chow, chow chow",
"keeshond",
"Brabancon griffon",
"Pembroke, Pembroke Welsh corgi",
"Cardigan, Cardigan Welsh corgi",
"toy poodle",
"miniature poodle",
"standard poodle",
"Mexican hairless",
"timber wolf, grey wolf, gray wolf, Canis lupus",
"white wolf, Arctic wolf, Canis lupus tundrarum",
"red wolf, maned wolf, Canis rufus, Canis niger",
"coyote, prairie wolf, brush wolf, Canis latrans",
"dingo, warrigal, warragal, Canis dingo",
"dhole, Cuon alpinus",
"African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus",
"hyena, hyaena",
"red fox, Vulpes vulpes",
"kit fox, Vulpes macrotis",
"Arctic fox, white fox, Alopex lagopus",
"grey fox, gray fox, Urocyon cinereoargenteus",
"tabby, tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat, Siamese",
"Egyptian cat",
"cougar, puma, catamount, mountain lion, painter, panther, Felis concolor",
"lynx, catamount",
"leopard, Panthera pardus",
"snow leopard, ounce, Panthera uncia",
"jaguar, panther, Panthera onca, Felis onca",
"lion, king of beasts, Panthera leo",
"tiger, Panthera tigris",
"cheetah, chetah, Acinonyx jubatus",
"brown bear, bruin, Ursus arctos",
"American black bear, black bear, Ursus americanus, Euarctos americanus",
"ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus",
"sloth bear, Melursus ursinus, Ursus ursinus",
"mongoose",
"meerkat, mierkat",
"tiger beetle",
"ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle",
"ground beetle, carabid beetle",
"long-horned beetle, longicorn, longicorn beetle",
"leaf beetle, chrysomelid",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant, emmet, pismire",
"grasshopper, hopper",
"cricket",
"walking stick, walkingstick, stick insect",
"cockroach, roach",
"mantis, mantid",
"cicada, cicala",
"leafhopper",
"lacewing, lacewing fly",
"dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
"damselfly",
"admiral",
"ringlet, ringlet butterfly",
"monarch, monarch butterfly, milkweed butterfly, Danaus plexippus",
"cabbage butterfly",
"sulphur butterfly, sulfur butterfly",
"lycaenid, lycaenid butterfly",
"starfish, sea star",
"sea urchin",
"sea cucumber, holothurian",
"wood rabbit, cottontail, cottontail rabbit",
"hare",
"Angora, Angora rabbit",
"hamster",
"porcupine, hedgehog",
"fox squirrel, eastern fox squirrel, Sciurus niger",
"marmot",
"beaver",
"guinea pig, Cavia cobaya",
"sorrel",
"zebra",
"hog, pig, grunter, squealer, Sus scrofa",
"wild boar, boar, Sus scrofa",
"warthog",
"hippopotamus, hippo, river horse, Hippopotamus amphibius",
"ox",
"water buffalo, water ox, Asiatic buffalo, Bubalus bubalis",
"bison",
"ram, tup",
"bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis",
"ibex, Capra ibex",
"hartebeest",
"impala, Aepyceros melampus",
"gazelle",
"Arabian camel, dromedary, Camelus dromedarius",
"llama",
"weasel",
"mink",
"polecat, fitch, foulmart, foumart, Mustela putorius",
"black-footed ferret, ferret, Mustela nigripes",
"otter",
"skunk, polecat, wood pussy",
"badger",
"armadillo",
"three-toed sloth, ai, Bradypus tridactylus",
"orangutan, orang, orangutang, Pongo pygmaeus",
"gorilla, Gorilla gorilla",
"chimpanzee, chimp, Pan troglodytes",
"gibbon, Hylobates lar",
"siamang, Hylobates syndactylus, Symphalangus syndactylus",
"guenon, guenon monkey",
"patas, hussar monkey, Erythrocebus patas",
"baboon",
"macaque",
"langur",
"colobus, colobus monkey",
"proboscis monkey, Nasalis larvatus",
"marmoset",
"capuchin, ringtail, Cebus capucinus",
"howler monkey, howler",
"titi, titi monkey",
"spider monkey, Ateles geoffroyi",
"squirrel monkey, Saimiri sciureus",
"Madagascar cat, ring-tailed lemur, Lemur catta",
"indri, indris, Indri indri, Indri brevicaudatus",
"Indian elephant, Elephas maximus",
"African elephant, Loxodonta africana",
"lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens",
"giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca",
"barracouta, snoek",
"eel",
"coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch",
"rock beauty, Holocanthus tricolor",
"anemone fish",
"sturgeon",
"gar, garfish, garpike, billfish, Lepisosteus osseus",
"lionfish",
"puffer, pufferfish, blowfish, globefish",
"abacus",
"abaya",
"academic gown, academic robe, judge's robe",
"accordion, piano accordion, squeeze box",
"acoustic guitar",
"aircraft carrier, carrier, flattop, attack aircraft carrier",
"airliner",
"airship, dirigible",
"altar",
"ambulance",
"amphibian, amphibious vehicle",
"analog clock",
"apiary, bee house",
"apron",
"ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin",
"assault rifle, assault gun",
"backpack, back pack, knapsack, packsack, rucksack, haversack",
"bakery, bakeshop, bakehouse",
"balance beam, beam",
"balloon",
"ballpoint, ballpoint pen, ballpen, Biro",
"Band Aid",
"banjo",
"bannister, banister, balustrade, balusters, handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel, cask",
"barrow, garden cart, lawn cart, wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"bathing cap, swimming cap",
"bath towel",
"bathtub, bathing tub, bath, tub",
"beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon",
"beacon, lighthouse, beacon light, pharos",
"beaker",
"bearskin, busby, shako",
"beer bottle",
"beer glass",
"bell cote, bell cot",
"bib",
"bicycle-built-for-two, tandem bicycle, tandem",
"bikini, two-piece",
"binder, ring-binder",
"binoculars, field glasses, opera glasses",
"birdhouse",
"boathouse",
"bobsled, bobsleigh, bob",
"bolo tie, bolo, bola tie, bola",
"bonnet, poke bonnet",
"bookcase",
"bookshop, bookstore, bookstall",
"bottlecap",
"bow",
"bow tie, bow-tie, bowtie",
"brass, memorial tablet, plaque",
"brassiere, bra, bandeau",
"breakwater, groin, groyne, mole, bulwark, seawall, jetty",
"breastplate, aegis, egis",
"broom",
"bucket, pail",
"buckle",
"bulletproof vest",
"bullet train, bullet",
"butcher shop, meat market",
"cab, hack, taxi, taxicab",
"caldron, cauldron",
"candle, taper, wax light",
"cannon",
"canoe",
"can opener, tin opener",
"cardigan",
"car mirror",
"carousel, carrousel, merry-go-round, roundabout, whirligig",
"carpenter's kit, tool kit",
"carton",
"car wheel",
"cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello, violoncello",
"cellular telephone, cellular phone, cellphone, cell, mobile phone",
"chain",
"chainlink fence",
"chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour",
"chain saw, chainsaw",
"chest",
"chiffonier, commode",
"chime, bell, gong",
"china cabinet, china closet",
"Christmas stocking",
"church, church building",
"cinema, movie theater, movie theatre, movie house, picture palace",
"cleaver, meat cleaver, chopper",
"cliff dwelling",
"cloak",
"clog, geta, patten, sabot",
"cocktail shaker",
"coffee mug",
"coffeepot",
"coil, spiral, volute, whorl, helix",
"combination lock",
"computer keyboard, keypad",
"confectionery, confectionary, candy store",
"container ship, containership, container vessel",
"convertible",
"corkscrew, bottle screw",
"cornet, horn, trumpet, trump",
"cowboy boot",
"cowboy hat, ten-gallon hat",
"cradle",
"crane",
"crash helmet",
"crate",
"crib, cot",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam, dike, dyke",
"desk",
"desktop computer",
"dial telephone, dial phone",
"diaper, nappy, napkin",
"digital clock",
"digital watch",
"dining table, board",
"dishrag, dishcloth",
"dishwasher, dish washer, dishwashing machine",
"disk brake, disc brake",
"dock, dockage, docking facility",
"dogsled, dog sled, dog sleigh",
"dome",
"doormat, welcome mat",
"drilling platform, offshore rig",
"drum, membranophone, tympan",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan, blower",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso maker",
"face powder",
"feather boa, boa",
"file, file cabinet, filing cabinet",
"fireboat",
"fire engine, fire truck",
"fire screen, fireguard",
"flagpole, flagstaff",
"flute, transverse flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster",
"freight car",
"French horn, horn",
"frying pan, frypan, skillet",
"fur coat",
"garbage truck, dustcart",
"gasmask, respirator, gas helmet",
"gas pump, gasoline pump, petrol pump, island dispenser",
"goblet",
"go-kart",
"golf ball",
"golfcart, golf cart",
"gondola",
"gong, tam-tam",
"gown",
"grand piano, grand",
"greenhouse, nursery, glasshouse",
"grille, radiator grille",
"grocery store, grocery, food market, market",
"guillotine",
"hair slide",
"hair spray",
"half track",
"hammer",
"hamper",
"hand blower, blow dryer, blow drier, hair dryer, hair drier",
"hand-held computer, hand-held microcomputer",
"handkerchief, hankie, hanky, hankey",
"hard disc, hard disk, fixed disk",
"harmonica, mouth organ, harp, mouth harp",
"harp",
"harvester, reaper",
"hatchet",
"holster",
"home theater, home theatre",
"honeycomb",
"hook, claw",
"hoopskirt, crinoline",
"horizontal bar, high bar",
"horse cart, horse-cart",
"hourglass",
"iPod",
"iron, smoothing iron",
"jack-o'-lantern",
"jean, blue jean, denim",
"jeep, landrover",
"jersey, T-shirt, tee shirt",
"jigsaw puzzle",
"jinrikisha, ricksha, rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat, laboratory coat",
"ladle",
"lampshade, lamp shade",
"laptop, laptop computer",
"lawn mower, mower",
"lens cap, lens cover",
"letter opener, paper knife, paperknife",
"library",
"lifeboat",
"lighter, light, igniter, ignitor",
"limousine, limo",
"liner, ocean liner",
"lipstick, lip rouge",
"Loafer",
"lotion",
"loudspeaker, speaker, speaker unit, loudspeaker system, speaker system",
"loupe, jeweler's loupe",
"lumbermill, sawmill",
"magnetic compass",
"mailbag, postbag",
"mailbox, letter box",
"maillot",
"maillot, tank suit",
"manhole cover",
"maraca",
"marimba, xylophone",
"mask",
"matchstick",
"maypole",
"maze, labyrinth",
"measuring cup",
"medicine chest, medicine cabinet",
"megalith, megalithic structure",
"microphone, mike",
"microwave, microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt, mini",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home, manufactured home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"mortarboard",
"mosque",
"mosquito net",
"motor scooter, scooter",
"mountain bike, all-terrain bike, off-roader",
"mountain tent",
"mouse, computer mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook, notebook computer",
"obelisk",
"oboe, hautboy, hautbois",
"ocarina, sweet potato",
"odometer, hodometer, mileometer, milometer",
"oil filter",
"organ, pipe organ",
"oscilloscope, scope, cathode-ray oscilloscope, CRO",
"overskirt",
"oxcart",
"oxygen mask",
"packet",
"paddle, boat paddle",
"paddlewheel, paddle wheel",
"padlock",
"paintbrush",
"pajama, pyjama, pj's, jammies",
"palace",
"panpipe, pandean pipe, syrinx",
"paper towel",
"parachute, chute",
"parallel bars, bars",
"park bench",
"parking meter",
"passenger car, coach, carriage",
"patio, terrace",
"pay-phone, pay-station",
"pedestal, plinth, footstall",
"pencil box, pencil case",
"pencil sharpener",
"perfume, essence",
"Petri dish",
"photocopier",
"pick, plectrum, plectron",
"pickelhaube",
"picket fence, paling",
"pickup, pickup truck",
"pier",
"piggy bank, penny bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate, pirate ship",
"pitcher, ewer",
"plane, carpenter's plane, woodworking plane",
"planetarium",
"plastic bag",
"plate rack",
"plow, plough",
"plunger, plumber's helper",
"Polaroid camera, Polaroid Land camera",
"pole",
"police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria",
"poncho",
"pool table, billiard table, snooker table",
"pop bottle, soda bottle",
"pot, flowerpot",
"potter's wheel",
"power drill",
"prayer rug, prayer mat",
"printer",
"prison, prison house",
"projectile, missile",
"projector",
"puck, hockey puck",
"punching bag, punch bag, punching ball, punchball",
"purse",
"quill, quill pen",
"quilt, comforter, comfort, puff",
"racer, race car, racing car",
"racket, racquet",
"radiator",
"radio, wireless",
"radio telescope, radio reflector",
"rain barrel",
"recreational vehicle, RV, R.V.",
"reel",
"reflex camera",
"refrigerator, icebox",
"remote control, remote",
"restaurant, eating house, eating place, eatery",
"revolver, six-gun, six-shooter",
"rifle",
"rocking chair, rocker",
"rotisserie",
"rubber eraser, rubber, pencil eraser",
"rugby ball",
"rule, ruler",
"running shoe",
"safe",
"safety pin",
"saltshaker, salt shaker",
"sandal",
"sarong",
"sax, saxophone",
"scabbard",
"scale, weighing machine",
"school bus",
"schooner",
"scoreboard",
"screen, CRT screen",
"screw",
"screwdriver",
"seat belt, seatbelt",
"sewing machine",
"shield, buckler",
"shoe shop, shoe-shop, shoe store",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule, slipstick",
"sliding door",
"slot, one-armed bandit",
"snorkel",
"snowmobile",
"snowplow, snowplough",
"soap dispenser",
"soccer ball",
"sock",
"solar dish, solar collector, solar furnace",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"speedboat",
"spider web, spider's web",
"spindle",
"sports car, sport car",
"spotlight, spot",
"stage",
"steam locomotive",
"steel arch bridge",
"steel drum",
"stethoscope",
"stole",
"stone wall",
"stopwatch, stop watch",
"stove",
"strainer",
"streetcar, tram, tramcar, trolley, trolley car",
"stretcher",
"studio couch, day bed",
"stupa, tope",
"submarine, pigboat, sub, U-boat",
"suit, suit of clothes",
"sundial",
"sunglass",
"sunglasses, dark glasses, shades",
"sunscreen, sunblock, sun blocker",
"suspension bridge",
"swab, swob, mop",
"sweatshirt",
"swimming trunks, bathing trunks",
"swing",
"switch, electric switch, electrical switch",
"syringe",
"table lamp",
"tank, army tank, armored combat vehicle, armoured combat vehicle",
"tape player",
"teapot",
"teddy, teddy bear",
"television, television system",
"tennis ball",
"thatch, thatched roof",
"theater curtain, theatre curtain",
"thimble",
"thresher, thrasher, threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop, tobacconist shop, tobacconist",
"toilet seat",
"torch",
"totem pole",
"tow truck, tow car, wrecker",
"toyshop",
"tractor",
"trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi",
"tray",
"trench coat",
"tricycle, trike, velocipede",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus, trolley coach, trackless trolley",
"trombone",
"tub, vat",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle, monocycle",
"upright, upright piano",
"vacuum, vacuum cleaner",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin, fiddle",
"volleyball",
"waffle iron",
"wall clock",
"wallet, billfold, notecase, pocketbook",
"wardrobe, closet, press",
"warplane, military plane",
"washbasin, handbasin, washbowl, lavabo, wash-hand basin",
"washer, automatic washer, washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool, woolen, woollen",
"worm fence, snake fence, snake-rail fence, Virginia fence",
"wreck",
"yawl",
"yurt",
"web site, website, internet site, site",
"comic book",
"crossword puzzle, crossword",
"street sign",
"traffic light, traffic signal, stoplight",
"book jacket, dust cover, dust jacket, dust wrapper",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot, hotpot",
"trifle",
"ice cream, icecream",
"ice lolly, lolly, lollipop, popsicle",
"French loaf",
"bagel, beigel",
"pretzel",
"cheeseburger",
"hotdog, hot dog, red hot",
"mashed potato",
"head cabbage",
"broccoli",
"cauliflower",
"zucchini, courgette",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber, cuke",
"artichoke, globe artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple, ananas",
"banana",
"jackfruit, jak, jack",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate sauce, chocolate syrup",
"dough",
"meat loaf, meatloaf",
"pizza, pizza pie",
"potpie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff, drop, drop-off",
"coral reef",
"geyser",
"lakeside, lakeshore",
"promontory, headland, head, foreland",
"sandbar, sand bar",
"seashore, coast, seacoast, sea-coast",
"valley, vale",
"volcano",
"ballplayer, baseball player",
"groom, bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
"corn",
"acorn",
"hip, rose hip, rosehip",
"buckeye, horse chestnut, conker",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn, carrion fungus",
"earthstar",
"hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa",
"bolete",
"ear, spike, capitulum",
"toilet tissue, toilet paper, bathroom tissue"]
|
PyTorch/Forecasting/TFT/triton/scripts/docker | docker | build | #!/usr/bin/env bash
# Copyright (c) 2021-2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker build -t tft . -f Dockerfile-triton
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | target_assigner | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow as tf
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder as bcoder
from object_detection.core import box_list
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.utils import shape_utils
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder: an object_detection.core.BoxCoder used to encode matching
groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder, bcoder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder,
negative_class_weight=negative_class_weight)
def batch_assign_targets(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label, gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.to_float(positive_anchors)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background * (1 - tf.to_float(negative_mask)))
cls_weights_without_background = (
(1 - implicit_class_weight) * tf.to_float(explicit_example_mask)
+ implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list)
|
PyTorch/Classification/ConvNets/triton/scripts/docker | docker | triton_inference_server | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=all}
docker run --rm -d \
-p 8000:8000 \
-p 8001:8001 \
-p 8002:8002 \
--runtime=nvidia \
-e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \
-e ORT_TENSORRT_FP16_ENABLE=1 \
-v ${MODEL_REPOSITORY_PATH}:${MODEL_REPOSITORY_PATH} \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
nvcr.io/nvidia/tritonserver:21.02-py3 tritonserver \
--model-store=${MODEL_REPOSITORY_PATH} \
--strict-model-config=false \
--exit-on-error=true \
--model-control-mode=explicit
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2LSTMCellPlugin | taco2LSTMCellPlugin | taco2LSTMCellLayerPlugin | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "taco2LSTMCellLayerPlugin.h"
#include "taco2LSTMCellKernel.h"
#include "taco2Utils.h"
#include <cuda_runtime.h> // cudaError_t
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <string>
using namespace nvinfer1;
namespace nvinfer1
{
namespace plugin
{
using value_type = Taco2LSTMCellLayerPlugin::value_type;
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const char* const PLUGIN_NAME = "Taco2LSTMCell";
constexpr const char* const PLUGIN_VERSION = "0.1.0";
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
std::vector<value_type> toVector(const Weights& weights)
{
if (weights.type != DataType::kFLOAT)
{
throw std::runtime_error(
"Invalid data type for Taco2LSTMCell weights: " + std::to_string(static_cast<int>(weights.type)));
}
const value_type* const valuesBegin = static_cast<const value_type*>(weights.values);
const value_type* const valuesEnd = valuesBegin + weights.count;
return std::vector<value_type>(valuesBegin, valuesEnd);
}
const void* offset(const void* ptr, const size_t offset)
{
return reinterpret_cast<const void*>(static_cast<const uint8_t*>(ptr) + offset);
}
} // namespace
/******************************************************************************
* STATIC METHODS *************************************************************
*****************************************************************************/
const char* Taco2LSTMCellLayerPlugin::getName()
{
return PLUGIN_NAME;
}
const char* Taco2LSTMCellLayerPlugin::getVersion()
{
return PLUGIN_VERSION;
}
Taco2LSTMCellLayerPlugin Taco2LSTMCellLayerPlugin::deserialize(const void* const data, const size_t length)
{
if (length < 5 * sizeof(int32_t))
{
throw std::runtime_error("Invalid serialized size: " + std::to_string(length));
}
const int inputLength = static_cast<const int32_t*>(data)[0];
const int inputLengthFirst = static_cast<const int32_t*>(data)[1];
const int inputLengthSecond = static_cast<const int32_t*>(data)[2];
const int numDimension = static_cast<const int32_t*>(data)[3];
const bool useFP16 = static_cast<const int32_t*>(data)[4];
const size_t reqSize = 5 * sizeof(int32_t)
+ sizeof(value_type)
* (4 * inputLength * numDimension + 4 * numDimension * numDimension + 2 * 4 * numDimension);
if (reqSize != length)
{
throw std::runtime_error(
"Invalid serialized size: " + std::to_string(length) + " / " + std::to_string(reqSize));
}
const Weights inputWeights{DataType::kFLOAT, offset(data, sizeof(int32_t) * 5), inputLength * numDimension * 4};
const Weights hiddenWeights{DataType::kFLOAT, offset(inputWeights.values, sizeof(value_type) * inputWeights.count),
numDimension * numDimension * 4};
const Weights inputBias{
DataType::kFLOAT, offset(hiddenWeights.values, sizeof(value_type) * hiddenWeights.count), numDimension * 4};
const Weights hiddenBias{
DataType::kFLOAT, offset(inputBias.values, sizeof(value_type) * inputBias.count), numDimension * 4};
Taco2LSTMCellLayerPlugin layer(
inputWeights, hiddenWeights, inputBias, hiddenBias, inputLength, numDimension, useFP16);
layer.mInputLengthFirst = inputLengthFirst;
layer.mInputLengthSecond = inputLengthSecond;
return layer;
}
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
Taco2LSTMCellLayerPlugin::Taco2LSTMCellLayerPlugin(const Weights& inputWeights, const Weights& hiddenWeights,
const Weights& inputBias, const Weights& hiddenBias, const int inputLength, const int numDimension,
const bool useFP16)
: mInputLength(inputLength)
, mInputLengthFirst(0)
, mInputLengthSecond(0)
, mNumDimension(numDimension)
, mInputWeightsHost(toVector(inputWeights))
, mHiddenWeightsHost(toVector(hiddenWeights))
, mInputBiasHost(toVector(inputBias))
, mHiddenBiasHost(toVector(hiddenBias))
, mNamespace()
, mCell(nullptr)
, mUseFP16(useFP16)
{
// do nothing
if (mInputLength <= 0)
{
throw std::runtime_error("Invalid Taco2LSTMCell length: " + std::to_string(mInputLength));
}
if (mNumDimension <= 0)
{
throw std::runtime_error("Invalid Taco2LSTMCell dimension: " + std::to_string(mNumDimension));
}
const size_t expectedInputWeights = mInputLength * mNumDimension * 4U;
const size_t expectedHiddenWeights = mNumDimension * mNumDimension * 4U;
const size_t expectedBias = mNumDimension * 4U;
if (mInputWeightsHost.size() != expectedInputWeights)
{
throw std::runtime_error("Taco2LSTMCell expected " + std::to_string(expectedInputWeights)
+ " input weights but given " + std::to_string(mInputWeightsHost.size()));
}
if (mHiddenWeightsHost.size() != expectedHiddenWeights)
{
throw std::runtime_error("Taco2LSTMCell expected " + std::to_string(expectedHiddenWeights)
+ " hidden weights but given " + std::to_string(mHiddenWeightsHost.size()));
}
if (mInputBiasHost.size() != expectedBias)
{
throw std::runtime_error("Taco2LSTMCell expected " + std::to_string(expectedBias) + " input bias but given "
+ std::to_string(mInputBiasHost.size()));
}
if (mHiddenBiasHost.size() != expectedBias)
{
throw std::runtime_error("Taco2LSTMCell expected " + std::to_string(expectedBias) + " hidden bias but given "
+ std::to_string(mHiddenBiasHost.size()));
}
}
Taco2LSTMCellLayerPlugin::Taco2LSTMCellLayerPlugin(Taco2LSTMCellLayerPlugin&& other)
: mInputLength(other.mInputLength)
, mInputLengthFirst(other.mInputLengthFirst)
, mInputLengthSecond(other.mInputLengthSecond)
, mNumDimension(other.mNumDimension)
, mInputWeightsHost(std::move(other.mInputWeightsHost))
, mHiddenWeightsHost(std::move(other.mHiddenWeightsHost))
, mInputBiasHost(std::move(other.mInputBiasHost))
, mHiddenBiasHost(std::move(other.mHiddenBiasHost))
, mNamespace(std::move(other.mNamespace))
, mCell(std::move(other.mCell))
, mUseFP16(other.mUseFP16)
{
other.mInputLength = 0;
other.mInputLengthFirst = 0;
other.mInputLengthSecond = 0;
other.mNumDimension = 0;
other.mUseFP16 = false;
}
Taco2LSTMCellLayerPlugin::~Taco2LSTMCellLayerPlugin()
{
destroy();
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
Taco2LSTMCellLayerPlugin& Taco2LSTMCellLayerPlugin::operator=(Taco2LSTMCellLayerPlugin&& other)
{
// defere to constructor
*this = Taco2LSTMCellLayerPlugin(std::move(other));
return *this;
}
DataType Taco2LSTMCellLayerPlugin::getOutputDataType(
const int /* index */, const DataType* const /* inputTypes */, const int /* nbInputs */) const
{
return DataType::kFLOAT;
}
const char* Taco2LSTMCellLayerPlugin::getPluginType() const
{
return getName();
}
const char* Taco2LSTMCellLayerPlugin::getPluginVersion() const
{
return getVersion();
}
int Taco2LSTMCellLayerPlugin::getNbOutputs() const
{
return 2;
}
DimsExprs Taco2LSTMCellLayerPlugin::getOutputDimensions(
const int outputIndex, const DimsExprs* inputs, const int nbInputs, IExprBuilder& exprBuilder)
{
if (nbInputs != NUM_INPUTS)
{
throw std::runtime_error("Can only handle three input tensors: " + std::to_string(nbInputs));
}
if (outputIndex == 0) {
// hidden
return DimsExprs{3, {inputs[INPUT_FIRST_INDEX].d[0], exprBuilder.constant(1), exprBuilder.constant(mNumDimension)}};
} else if (outputIndex == 1) {
// cell
return DimsExprs{3, {inputs[INPUT_FIRST_INDEX].d[0], exprBuilder.constant(1), exprBuilder.constant(mNumDimension)}};
} else {
throw std::runtime_error("Invalid output index: " + std::to_string(outputIndex));
}
}
bool Taco2LSTMCellLayerPlugin::supportsFormatCombination(
const int pos, const PluginTensorDesc* const inOut, const int /* nbInputs */, const int /* nbOutputs */)
{
return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
}
void Taco2LSTMCellLayerPlugin::configurePlugin(const DynamicPluginTensorDesc* const in, const int nbInputs,
const DynamicPluginTensorDesc* const out, const int nbOutputs)
{
if (nbInputs != NUM_INPUTS)
{
throw std::runtime_error("Only three inputs is implemented: " + std::to_string(nbInputs));
}
for (int i = 0; i < nbInputs; ++i)
{
if (in[i].desc.type != DataType::kFLOAT)
{
throw std::runtime_error("Only FLOAT supported as input " + std::to_string(i) + " : "
+ std::to_string(static_cast<int>(in[i].desc.type)));
}
}
if (nbOutputs != 2)
{
throw std::runtime_error("Only two outputs is implemented: " + std::to_string(nbOutputs));
}
for (int i = 0; i < nbOutputs; ++i)
{
if (out[i].desc.type != DataType::kFLOAT)
{
throw std::runtime_error("Only FLOAT supported as output: " + std::to_string(i) + " : "
+ std::to_string(static_cast<int>(out[i].desc.type)));
}
}
{
const Dims dims = in[INPUT_FIRST_INDEX].desc.dims;
bool dimsFound = false;
for (int d = 1; d < dims.nbDims; ++d)
{
if (dims.d[d] != 1)
{
if (dimsFound)
{
throw std::runtime_error("Invalid first input dimension: " + taco2::Taco2Utils::dimsToString(dims));
}
mInputLengthFirst = dims.d[d];
dimsFound = true;
}
}
if (!dimsFound)
{
throw std::runtime_error("Invalid first input dimension: " + taco2::Taco2Utils::dimsToString(dims));
}
}
{
const Dims dims = in[INPUT_SECOND_INDEX].desc.dims;
bool dimsFound = false;
for (int d = 1; d < dims.nbDims; ++d)
{
if (dims.d[d] != 1)
{
if (dimsFound)
{
throw std::runtime_error(
"Invalid second input dimension: " + taco2::Taco2Utils::dimsToString(dims));
}
mInputLengthSecond = dims.d[d];
dimsFound = true;
}
}
if (!dimsFound)
{
throw std::runtime_error("Invalid second input dimension: " + taco2::Taco2Utils::dimsToString(dims));
}
}
if (mInputLengthFirst + mInputLengthSecond != mInputLength)
{
throw std::runtime_error("Invalid input lenghts: " + std::to_string(mInputLengthFirst) + " "
+ std::to_string(mInputLengthSecond) + " != " + std::to_string(mInputLength));
}
}
int Taco2LSTMCellLayerPlugin::initialize()
{
try
{
mCell.reset(new Taco2LSTMCellKernel(mInputWeightsHost.data(), mHiddenWeightsHost.data(), mInputBiasHost.data(),
mHiddenBiasHost.data(), mInputLength, mNumDimension, mUseFP16));
}
catch (const std::exception& e)
{
std::cerr << "Taco2LSTMCellLayerPlugin initialization failed: " << e.what() << std::endl;
return 1;
}
return 0;
}
void Taco2LSTMCellLayerPlugin::terminate()
{
mCell.reset();
}
size_t Taco2LSTMCellLayerPlugin::getWorkspaceSize(const PluginTensorDesc* const /* in */, const int /* nbInputs */,
const PluginTensorDesc* const /* out */, const int /* nbOutputs */) const
{
return 0;
}
int Taco2LSTMCellLayerPlugin::enqueue(const PluginTensorDesc* const inputDesc, const PluginTensorDesc* const /* outputDesc */,
const void* const* const inputs, void* const* const outputs, void* const /*workspace*/, cudaStream_t stream)
{
const int batchSize = inputDesc[INPUT_FIRST_INDEX].dims.d[0];
if (batchSize != 1)
{
// we only support batch size of 1 right now
std::cerr << "Taco2LSTMCellLayerPlugin plugin does not support batch size other than 1: got " << batchSize
<< std::endl;
std::cerr << "Recompile without plugins to use a larger batch size." << std::endl;
return 1;
}
else if (!mCell)
{
std::cerr << "Taco2LSTMCellLayerPlugin is not initialized properly." << std::endl;
return 1;
}
// name inputs and outputs
const value_type* const inputFirstDevice = static_cast<const value_type*>(inputs[INPUT_FIRST_INDEX]);
const value_type* const inputSecondDevice = static_cast<const value_type*>(inputs[INPUT_SECOND_INDEX]);
const value_type* const inputHiddenDevice = static_cast<const value_type*>(inputs[HIDDEN_INDEX]);
const value_type* const inputCellDevice = static_cast<const value_type*>(inputs[CELL_INDEX]);
value_type* const outputHiddenDevice = static_cast<value_type*>(outputs[0]);
value_type* const outputCellDevice = static_cast<value_type*>(outputs[1]);
// launch kernel to perform lstm on `(Wi+Wh)+(bi+bh)`
mCell->execute(inputFirstDevice, inputSecondDevice, inputHiddenDevice, inputCellDevice, outputHiddenDevice,
outputCellDevice, mInputLengthFirst, mInputLengthSecond, stream);
return 0;
}
size_t Taco2LSTMCellLayerPlugin::getSerializationSize() const
{
return 5 * sizeof(int32_t) + numInputWeightBytes() + numHiddenWeightBytes() + 2 * numBiasBytes();
}
void Taco2LSTMCellLayerPlugin::serialize(void* const buffer) const
{
static_cast<int32_t*>(buffer)[0] = mInputLength;
static_cast<int32_t*>(buffer)[1] = mInputLengthFirst;
static_cast<int32_t*>(buffer)[2] = mInputLengthSecond;
static_cast<int32_t*>(buffer)[3] = mNumDimension;
static_cast<int32_t*>(buffer)[4] = mUseFP16;
float* const inputWeights = reinterpret_cast<float*>(static_cast<int32_t*>(buffer) + 5);
float* const hiddenWeights = inputWeights + numInputWeights();
float* const inputBias = hiddenWeights + numHiddenWeights();
float* const hiddenBias = inputBias + numBiases();
memcpy(inputWeights, mInputWeightsHost.data(), numInputWeightBytes());
memcpy(hiddenWeights, mHiddenWeightsHost.data(), numHiddenWeightBytes());
memcpy(inputBias, mInputBiasHost.data(), numBiasBytes());
memcpy(hiddenBias, mHiddenBiasHost.data(), numBiasBytes());
}
void Taco2LSTMCellLayerPlugin::destroy()
{
terminate();
}
IPluginV2DynamicExt* Taco2LSTMCellLayerPlugin::clone() const
{
// call constructor which copy's data
Taco2LSTMCellLayerPlugin clone(
Weights{DataType::kFLOAT, mInputWeightsHost.data(), static_cast<int64_t>(mInputWeightsHost.size())},
Weights{DataType::kFLOAT, mHiddenWeightsHost.data(), static_cast<int64_t>(mHiddenWeightsHost.size())},
Weights{DataType::kFLOAT, mInputBiasHost.data(), static_cast<int64_t>(mInputBiasHost.size())},
Weights{DataType::kFLOAT, mHiddenBiasHost.data(), static_cast<int64_t>(mHiddenBiasHost.size())}, mInputLength,
mNumDimension, mUseFP16);
clone.mInputLengthFirst = mInputLengthFirst;
clone.mInputLengthSecond = mInputLengthSecond;
if (mCell)
{
// initialize the clone too
clone.initialize();
}
// move it to the heap last to avoid exceptions causing memory leaks
return new Taco2LSTMCellLayerPlugin(std::move(clone));
}
void Taco2LSTMCellLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mNamespace = pluginNamespace;
}
const char* Taco2LSTMCellLayerPlugin::getPluginNamespace() const
{
return mNamespace.c_str();
}
/******************************************************************************
* PRIVATE METHODS ************************************************************
*****************************************************************************/
int Taco2LSTMCellLayerPlugin::numInputWeights() const
{
return mNumDimension * mInputLength * 4;
}
int Taco2LSTMCellLayerPlugin::numHiddenWeights() const
{
return mNumDimension * mNumDimension * 4;
}
int Taco2LSTMCellLayerPlugin::numBiases() const
{
return mNumDimension * 4;
}
size_t Taco2LSTMCellLayerPlugin::numInputWeightBytes() const
{
return numInputWeights() * sizeof(value_type);
}
size_t Taco2LSTMCellLayerPlugin::numHiddenWeightBytes() const
{
return numHiddenWeights() * sizeof(value_type);
}
size_t Taco2LSTMCellLayerPlugin::numBiasBytes() const
{
return numBiases() * sizeof(value_type);
}
} // namespace plugin
} // namespace nvinfer1
|
PyTorch/SpeechSynthesis/Tacotron2/filelists | filelists | ljs_audio_text_train_subset_64_filelist | LJSpeech-1.1/wavs/LJ040-0100.wav|she would sometimes take Lee with her, apparently leaving him alone in the car while she transacted her business.
LJSpeech-1.1/wavs/LJ011-0248.wav|Howard, strange to say, making no attempt to detain him; probably because Mullay promised to return a few days later, and to bring more money.
LJSpeech-1.1/wavs/LJ016-0442.wav|made a determined effort to burn himself to death by throwing himself bodily on to the fire in the condemned ward.
LJSpeech-1.1/wavs/LJ026-0036.wav|and then a balance must be struck and the doubtful form placed in the kingdom with which it has, on the whole, most points in common.
LJSpeech-1.1/wavs/LJ042-0176.wav|One offers oppression, the other poverty. Both offer imperialistic injustice, tinted with two brands of slavery, end quote.
LJSpeech-1.1/wavs/LJ003-0323.wav|Drunkenness, if it ever occurred, should be visited with severe punishment;
LJSpeech-1.1/wavs/LJ045-0161.wav|He was upset over the fact that I would not answer him.
LJSpeech-1.1/wavs/LJ028-0187.wav|Cyrus decided that Babylon must be taken.
LJSpeech-1.1/wavs/LJ037-0178.wav|or one used Remington-Peters cartridge case, which may have been in the revolver before the shooting,
LJSpeech-1.1/wavs/LJ010-0164.wav|Oxford, who was only nineteen at the time his offense was committed, had been born at Birmingham,
LJSpeech-1.1/wavs/LJ019-0178.wav|and abandoned because of the expense. As to the entire reconstruction of Newgate, nothing had been done as yet.
LJSpeech-1.1/wavs/LJ050-0117.wav|particularly those arising from organized groups, within their special jurisdiction.
LJSpeech-1.1/wavs/LJ033-0128.wav|that the bag Oswald carried contained the assassination weapon and has concluded that Frazier and Randle are mistaken as to the length of the bag.
LJSpeech-1.1/wavs/LJ007-0179.wav|defeats the ends of justice, and disgraces the profession of a Christian country.
LJSpeech-1.1/wavs/LJ033-0067.wav|She pointed to the blanket which was on the floor very close to where Ruth Paine was standing.
LJSpeech-1.1/wavs/LJ004-0139.wav|"In the morning the stench and heat were so oppressive that he and every one else on waking rushed unclothed into the yard;"
LJSpeech-1.1/wavs/LJ009-0208.wav|erected on the cart, about four feet high at the head, and gradually sloping towards the horse, giving a full view of the body,
LJSpeech-1.1/wavs/LJ012-0144.wav|and passed it on to Solomons by his daughter, a widow named Abrahams.
LJSpeech-1.1/wavs/LJ001-0020.wav|the "lower-case" being in fact invented in the early Middle Ages.
LJSpeech-1.1/wavs/LJ014-0227.wav|One of these was Mobbs, who lived in the Minories,
LJSpeech-1.1/wavs/LJ040-0146.wav|He noted that Lee liked to give the impression that he did not care for other people but preferred to keep to himself,
LJSpeech-1.1/wavs/LJ001-0149.wav|From the time when books first took their present shape till the end of the sixteenth century, or indeed later,
LJSpeech-1.1/wavs/LJ002-0143.wav|The commissioners who presided were, quote, little otherwise than self-elected
LJSpeech-1.1/wavs/LJ014-0217.wav|Dwyer managed to overpower his assailant, and got to his feet; but Cannon butted at him with his head, and again threw him to the ground,
LJSpeech-1.1/wavs/LJ005-0250.wav|The prisoners were crowded together in the jail, contrary to the requirements of the four George the fourth
LJSpeech-1.1/wavs/LJ042-0049.wav|I never believed I would find more material advantages at this stage of development in the Soviet Union than I might of had in the U.S.
LJSpeech-1.1/wavs/LJ014-0198.wav|Marley at his trial was undefended, and the sheriffs offered him counsel; but he declined. The witnesses against him all spoke the truth, he said;
LJSpeech-1.1/wavs/LJ034-0093.wav|Brennan also testified that Lee Harvey Oswald,
LJSpeech-1.1/wavs/LJ016-0237.wav|With Calcraft's method there were undoubtedly many failures, and it was a common custom for him to go below the gallows
LJSpeech-1.1/wavs/LJ015-0156.wav|Down at Weybridge, where he had a country place, his name was long remembered with gratitude by the poor.
LJSpeech-1.1/wavs/LJ018-0047.wav|He adhered to this almost to the very last. His case had been warmly espoused by the Society for the Protection of Germans in this country,
LJSpeech-1.1/wavs/LJ013-0020.wav|he acted in a manner which excited the suspicions of the crew.
LJSpeech-1.1/wavs/LJ002-0041.wav|Two other wards were appropriated to the master's side debtors; they were each twenty-three feet by fourteen and a half,
LJSpeech-1.1/wavs/LJ008-0227.wav|slipshod and slovenly, in crushed bonnet and dirty shawl, the gown fastened by a single hook,
LJSpeech-1.1/wavs/LJ007-0029.wav|The condition of the capitally-convicted prisoners after sentence was still very disgraceful. The side they occupied, still known as the press-yard,
LJSpeech-1.1/wavs/LJ018-0358.wav|Christina Edmunds had resort to strychnia, the same lethal drug that Palmer used;
LJSpeech-1.1/wavs/LJ007-0198.wav|The windows were to be glazed and painted to prevent prisoners from looking out;
LJSpeech-1.1/wavs/LJ043-0032.wav|After about a two-week separation, Marina Oswald returned to her husband.
LJSpeech-1.1/wavs/LJ035-0071.wav|At a given signal, they reenacted the event. Baker's movements were timed with a stopwatch.
LJSpeech-1.1/wavs/LJ009-0092.wav|his legs give way, he utters a faint groan, and sinks on the floor.
LJSpeech-1.1/wavs/LJ019-0310.wav|which had long been admitted as indispensable, and had never as yet been properly obtained.
LJSpeech-1.1/wavs/LJ038-0071.wav|When he entered the homicide and robbery bureau office, he saw two detectives standing there with Sgt. Gerald L. Hill,
LJSpeech-1.1/wavs/LJ014-0291.wav|he showed symptoms of delirium tremens, and admitted that he had been addicted to the excessive use of stimulants.
LJSpeech-1.1/wavs/LJ014-0283.wav|The jury found him guilty of the latter only, with a point of law reserved. This was fully argued before three judges,
LJSpeech-1.1/wavs/LJ021-0096.wav|under the able and energetic leadership of General Johnson.
LJSpeech-1.1/wavs/LJ045-0075.wav|She was, quote, sorry that I had not married him (the Russian boyfriend) instead, that it would have been much easier for me, end quote.
LJSpeech-1.1/wavs/LJ022-0203.wav|For that we can be thankful to the God who watches over America.
LJSpeech-1.1/wavs/LJ029-0073.wav|that the President would arrive and depart from Dallas' Love Field; that a motorcade through the downtown area of Dallas to the luncheon site should be arranged;
LJSpeech-1.1/wavs/LJ040-0187.wav|According to Sokolow, this indicated a, quote, present intellectual functioning in the upper range of bright normal intelligence, end quote.
LJSpeech-1.1/wavs/LJ016-0101.wav|One of the three, shamming ill, remained all day in his ward, where he employed himself unraveling the rope from the sleeping-mats.
LJSpeech-1.1/wavs/LJ015-0086.wav|He kept open house at Kilburn Priory;
LJSpeech-1.1/wavs/LJ028-0427.wav|The enormous amount of debris which buried the palaces and temples and walls of Nebuchadnezzar's city, in places to the depth of a hundred feet,
LJSpeech-1.1/wavs/LJ048-0248.wav|President Kennedy was scheduled to speak across the street from his hotel in Fort Worth at eight:thirty a.m.
LJSpeech-1.1/wavs/LJ021-0095.wav|We are now prepared to move into this second phase, on the basis of our experience in the first phase
LJSpeech-1.1/wavs/LJ030-0081.wav|They were instructed to watch particularly for thrown objects, sudden actions in the crowd, and any movements toward the Presidential car.
LJSpeech-1.1/wavs/LJ032-0176.wav|Moreover, the bus transfer which he obtained as he left the bus was still in the pocket when he was arrested.
LJSpeech-1.1/wavs/LJ044-0129.wav|and often it is advisable for some people to remain in the background, not underground, end quote.
LJSpeech-1.1/wavs/LJ018-0177.wav|But as there was no independent corroboration of the informer's evidence, according to the custom of the British law,
LJSpeech-1.1/wavs/LJ049-0113.wav|This point was ably made in the nineteen oh two debate by Senator George F. Hoar, the sponsor of the Senate bill, quote,
LJSpeech-1.1/wavs/LJ050-0141.wav|As a beginning step to improve liaison with local law enforcement officials, the Secret Service on August twenty-six, nineteen sixty-four,
LJSpeech-1.1/wavs/LJ013-0156.wav|a scion of the ducal house of Bedford, by his confidential valet and personal attendant.
LJSpeech-1.1/wavs/LJ032-0222.wav|Moreover, Shaneyfelt testified that in his opinion the photographs were not composites of two different photographs
LJSpeech-1.1/wavs/LJ004-0052.wav|which Howard had eulogized some forty years before.
LJSpeech-1.1/wavs/LJ006-0017.wav|with those who made the selection of the first inspectors, and the two gentlemen appointed were probably the most fitted in England to be so employed.
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph | graph | rmat | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional, Set, Tuple
import numpy as np
from syngen.generator.graph.fitter import RMATFitter
from syngen.generator.graph.base_graph_generator import BaseGraphGenerator
from syngen.generator.graph.utils import (
effective_nonsquare_rmat_exact,
generate_gpu_rmat,
generate_gpu_chunked_rmat,
)
class RMATGenerator(BaseGraphGenerator):
""" Graph generator based on RMAT that generate non-partite graphs
Args:
seed (int): Seed to reproduce the results. If None then random seed will be used.
logdir (str): Directory to store the logging results.
fitter (RMATFitter): RMATFitter to be used.
"""
def __init__(
self,
seed: Optional[int] = None,
logdir: str = "./logs",
gpu: bool = True,
fitter: Optional[RMATFitter] = None,
**kwargs,
):
super().__init__(seed, logdir, gpu)
self.fitter = fitter or RMATFitter()
def fit(self, *args, **kwargs):
""" Fits generator on the graph
Args:
"""
self._fit_results = self.fitter.fit(*args, **kwargs)
self.logger.log(f"Fit results: {self._fit_results}")
def _generate_part(
self,
fit_results: Tuple[float, float, float, float],
part_shape: Tuple[int, int],
num_edges: int,
has_self_loop: bool,
is_directed: bool,
noise: float,
batch_size: int,
return_node_ids: bool,
save_path: Optional[str],
):
if self.gpu:
return self._generate_part_gpu(
fit_results=fit_results,
part_shape=part_shape,
num_edges=num_edges,
has_self_loop=has_self_loop,
is_directed=is_directed,
noise=noise,
return_node_ids=return_node_ids,
save_path=save_path,
)
else:
return self._generate_part_cpu(
fit_results=fit_results,
part_shape=part_shape,
num_edges=num_edges,
has_self_loop=has_self_loop,
is_directed=is_directed,
noise=noise,
batch_size=batch_size,
return_node_ids=return_node_ids,
)
def _generate_part_cpu(
self,
fit_results: Tuple[float, float, float, float],
part_shape: Tuple[int, int],
num_edges: int,
has_self_loop: bool,
is_directed: bool,
noise: float,
batch_size: int,
return_node_ids: bool,
):
a, b, c, d = fit_results
theta = np.array([[a, b], [c, d]])
theta /= a + b + c + d
res = effective_nonsquare_rmat_exact(
theta,
num_edges,
part_shape,
noise_scaling=noise,
batch_size=batch_size,
dtype=np.int64,
custom_samplers=None,
generate_back_edges=not is_directed,
remove_selfloops=not has_self_loop,
return_node_ids=return_node_ids,
verbose=self.verbose,
)
if return_node_ids:
return res[0], res[1]
return res[0]
def _generate_part_gpu(
self,
fit_results: Tuple[float, float, float, float],
part_shape: Tuple[int, int],
num_edges: int,
has_self_loop: bool,
is_directed: bool,
noise: float,
return_node_ids: bool,
save_path: Optional[str],
_chunked: bool = True,
):
a, b, c, d = fit_results
theta = np.array([a, b, c, d])
theta /= a + b + c + d
a, b, c, d = theta
r_scale, c_scale = part_shape
if _chunked:
res = generate_gpu_chunked_rmat(
a,
b,
c,
d,
r_scale=r_scale,
c_scale=c_scale,
n_edges=num_edges,
noise=noise,
is_directed=is_directed,
has_self_loop=has_self_loop,
return_node_ids=1 if return_node_ids else 0,
save_path=save_path,
verbose=self.verbose,
)
else:
res = generate_gpu_rmat(
a,
b,
c,
d,
r_scale=r_scale,
c_scale=c_scale,
n_edges=num_edges,
noise=noise,
is_directed=is_directed,
has_self_loop=has_self_loop,
return_node_ids=1 if return_node_ids else 0,
)
if return_node_ids:
return res[0], res[1]
return res
def generate(
self,
num_nodes: int,
num_edges: int,
is_directed: bool,
has_self_loop: bool,
noise: float = 0.5,
batch_size: int = 1_000_000,
return_node_ids: bool = False,
save_path: Optional[str] = None,
*args,
**kwargs,
):
""" Generates graph with approximately `num_nodes` nodes and exactly `num_edges` edges from generator
Args:
num_nodes (int): approximate number of nodes to be generated
num_edges(int): exact number of edges to be generated
is_directed (bool): flag indicating whether the generated graph has to be directed
has_self_loop (bool): flag indicating whether to generate self loops
noise (float): noise for RMAT generation to get better degree distribution
batch_size (int): size of the edge chunk that will be generated in one generation step (cpu parameter)
return_node_ids (bool): flag indicating whether the generator has to return nodes_ids as the second output
save_path (bool): path to store the graph. if specified the method return the number of edges in the graph
Returns:
new_graph (np.array[int, int]): generated graph
"""
assert num_nodes > 0, "Wrong number of nodes"
assert num_edges > 0, "Wrong number of edges"
max_edges = (
num_nodes * num_nodes
if has_self_loop
else num_nodes * (num_nodes - 1)
)
if is_directed:
max_edges = max_edges / 2
assert (
num_edges < max_edges
), "Configuration of nodes and edges cannot form any graph"
assert (
self._fit_results
), "There are no fit results, call fit method first or load the seeding matrix from the file"
log2_nodes = math.ceil(math.log2(num_nodes))
part_shape = (log2_nodes, log2_nodes)
res = self._generate_part(
fit_results=self._fit_results,
part_shape=part_shape,
num_edges=num_edges,
has_self_loop=has_self_loop,
is_directed=is_directed,
noise=noise,
batch_size=batch_size,
return_node_ids=return_node_ids,
save_path=save_path,
)
if return_node_ids:
return res[0], res[1]
return res
|
PyTorch/LanguageModeling/BART/bart/tokenization | tokenization | tokenization_utils_base | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Base classes common to both the slow and the fast tokenization classes:
PreTrainedTokenizerBase (host all the user fronting encoding methodes)
Special token mixing (host the special tokens logic) and
BatchEncoding (wrap the dictionnary of output with special method for the Fast tokenizers)
"""
import copy
import json
import logging
import os
import warnings
from collections import OrderedDict, UserDict
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np
from tokenizers import AddedToken
from tokenizers import Encoding as EncodingFast
from utils.file_utils import (
add_end_docstrings,
cached_path,
hf_bucket_url,
is_remote_url,
is_tf_available,
is_torch_available,
torch_required,
)
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
# Define type aliases and NamedTuples
TextInput = str
PreTokenizedInput = List[str]
EncodedInput = List[int]
TextInputPair = Tuple[str, str]
PreTokenizedInputPair = Tuple[List[str], List[str]]
EncodedInputPair = Tuple[List[int], List[int]]
# Slow tokenizers used to be saved in three separated files
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
FULL_TOKENIZER_FILE = "tokenizer.json"
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
def _missing_(cls, value):
raise ValueError(
"%r is not a valid %s, please select one of %s"
% (value, cls.__name__, str(list(cls._value2member_map_.keys())))
)
class TruncationStrategy(ExplicitEnum):
"""
Possible values for the ``truncation`` argument in :meth:`PreTrainedTokenizerBase.__call__`.
Useful for tab-completion in an IDE.
"""
ONLY_FIRST = "only_first"
ONLY_SECOND = "only_second"
LONGEST_FIRST = "longest_first"
DO_NOT_TRUNCATE = "do_not_truncate"
class PaddingStrategy(ExplicitEnum):
"""
Possible values for the ``padding`` argument in :meth:`PreTrainedTokenizerBase.__call__`.
Useful for tab-completion in an IDE.
"""
LONGEST = "longest"
MAX_LENGTH = "max_length"
DO_NOT_PAD = "do_not_pad"
class TensorType(ExplicitEnum):
"""
Possible values for the ``return_tensors`` argument in :meth:`PreTrainedTokenizerBase.__call__`.
Useful for tab-completion in an IDE.
"""
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
class CharSpan(NamedTuple):
"""
Character span in the original string.
Args:
start (:obj:`int`): Index of the first character in the original string.
end (:obj:`int`): Index of the character following the last character in the original string.
"""
start: int
end: int
class TokenSpan(NamedTuple):
"""
Token span in an encoded string (list of tokens).
Args:
start (:obj:`int`): Index of the first token in the span.
end (:obj:`int`): Index of the token following the last token in the span.
"""
start: int
end: int
class BatchEncoding(UserDict):
"""
Holds the output of the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`
and :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.batch_encode` methods (tokens,
attention_masks, etc).
This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
utility methods to map from word/character space to token space.
Args:
data (:obj:`dict`):
Dictionary of lists/arrays/tensors returned by the encode/batch_encode methods ('input_ids',
'attention_mask', etc.).
encoding (:obj:`tokenizers.Encoding` or :obj:`Sequence[tokenizers.Encoding]`, `optional`):
If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/character
space to token space the :obj:`tokenizers.Encoding` instance or list of instance (for batches) hold these
informations.
tensor_type (:obj:`Union[None, str, TensorType]`, `optional`):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to add a batch axis when converting to tensors (see :obj:`tensor_type` above).
"""
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
tensor_type: Union[None, str, TensorType] = None,
prepend_batch_axis: bool = False,
):
super().__init__(data)
if isinstance(encoding, EncodingFast):
encoding = [encoding]
self._encodings = encoding
self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis)
@property
def is_fast(self) -> bool:
"""
:obj:`bool`: Indicate whether this :class:`~transformers.BatchEncoding` was generated from the result of a
:class:`~transformers.PreTrainedTokenizerFast` or not.
"""
return self._encodings is not None
def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]:
"""
If the key is a string, returns the value of the dict associated to :obj:`key` ('input_ids',
'attention_mask', etc.).
If the key is an integer, get the :obj:`tokenizers.Encoding` for batch item with index :obj:`key`.
"""
if isinstance(item, str):
return self.data[item]
elif self._encodings is not None:
return self._encodings[item]
else:
raise KeyError(
"Indexing with integers (to access backend Encoding for a given batch index) "
"is not available when using Python based tokenizers"
)
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def __getstate__(self):
return {"data": self.data, "encodings": self._encodings}
def __setstate__(self, state):
if "data" in state:
self.data = state["data"]
if "encodings" in state:
self._encodings = state["encodings"]
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
# After this point:
# Extended properties and methods only available for fast (Rust-based) tokenizers
# provided by HuggingFace tokenizers library.
@property
def encodings(self) -> Optional[List[EncodingFast]]:
"""
:obj:`Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process.
Returns :obj:`None` if the input was tokenized through Python (i.e., not a fast) tokenizer.
"""
return self._encodings
def tokens(self, batch_index: int = 0) -> List[str]:
"""
Return the list of tokens (sub-parts of the input strings after word/subword splitting and before converstion
to integer indices) at a given batch index (only works for the output of a fast tokenizer).
Args:
batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch.
Returns:
:obj:`List[str]`: The list of tokens at that index.
"""
if not self._encodings:
raise ValueError("tokens() is not available when using Python-based tokenizers")
return self._encodings[batch_index].tokens
def words(self, batch_index: int = 0) -> List[Optional[int]]:
"""
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
Args:
batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch.
Returns:
:obj:`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by
the tokenizer are mapped to :obj:`None` and other tokens are mapped to the index of their corresponding
word (several tokens will be mapped to the same word index if they are parts of that word).
"""
if not self._encodings:
raise ValueError("words() is not available when using Python-based tokenizers")
return self._encodings[batch_index].words
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
"""
Get the index of the word corresponding (i.e. comprising) to an encoded token
in a sequence of the batch.
Can be called as:
- ``self.token_to_word(token_index)`` if batch size is 1
- ``self.token_to_word(batch_index, token_index)`` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e., words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence.
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token in the sequence.
Returns:
:obj:`int`: Index of the word in the input sequence.
"""
if not self._encodings:
raise ValueError("token_to_word() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if token_index < 0:
token_index = self._seq_len + token_index
return self._encodings[batch_index].token_to_word(token_index)
def word_to_tokens(self, batch_or_word_index: int, word_index: Optional[int] = None) -> TokenSpan:
"""
Get the encoded token span corresponding to a word in the sequence of the batch.
Token spans are returned as a :class:`~transformers.tokenization_utils_base.TokenSpan` with:
- **start** -- Index of the first token.
- **end** -- Index of the token following the last token.
Can be called as:
- ``self.word_to_tokens(word_index)`` if batch size is 1
- ``self.word_to_tokens(batch_index, word_index)`` if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprises one sequence,
this can be the index of the word in the sequence.
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
:class:`~transformers.tokenization_utils_base.TokenSpan`
Span of tokens in the encoded sequence.
"""
if not self._encodings:
raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
if batch_index < 0:
batch_index = self._batch_size + batch_index
if word_index < 0:
word_index = self._seq_len + word_index
return TokenSpan(*(self._encodings[batch_index].word_to_tokens(word_index)))
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
"""
Get the character span corresponding to an encoded token in a sequence of the batch.
Character spans are returned as a :class:`~transformers.tokenization_utils_base.CharSpan` with:
- **start** -- Index of the first character in the original string associated to the token.
- **end** -- Index of the character following the last character in the original string associated to the
token.
Can be called as:
- ``self.token_to_chars(token_index)`` if batch size is 1
- ``self.token_to_chars(batch_index, token_index)`` if batch size is greater or equal to 1
Args:
batch_or_token_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the token in the sequence.
token_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the token or tokens in the sequence.
Returns:
:class:`~transformers.tokenization_utils_base.CharSpan`:
Span of characters in the original string.
"""
if not self._encodings:
raise ValueError("token_to_chars() is not available when using Python based tokenizers")
if token_index is not None:
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
def char_to_token(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
"""
Get the index of the token in the encoded output comprising a character
in the original string for a sequence of the batch.
Can be called as:
- ``self.char_to_token(char_index)`` if batch size is 1
- ``self.char_to_token(batch_index, char_index)`` if batch size is greater or equal to 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
:obj:`int`: Index of the token.
"""
if not self._encodings:
raise ValueError("char_to_token() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_token(char_index)
def word_to_chars(self, batch_or_word_index: int, word_index: Optional[int] = None) -> CharSpan:
"""
Get the character span in the original string corresponding to given word in a sequence
of the batch.
Character spans are returned as a CharSpan NamedTuple with:
- start: index of the first character in the original string
- end: index of the character following the last character in the original string
Can be called as:
- ``self.word_to_chars(word_index)`` if batch size is 1
- ``self.word_to_chars(batch_index, word_index)`` if batch size is greater or equal to 1
Args:
batch_or_word_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the word in the sequence
word_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the word in the sequence.
Returns:
:obj:`CharSpan` or :obj:`List[CharSpan]`:
Span(s) of the associated character or characters in the string.
CharSpan are NamedTuple with:
- start: index of the first character associated to the token in the original string
- end: index of the character following the last character associated to the token in the original string
"""
if not self._encodings:
raise ValueError("word_to_chars() is not available when using Python based tokenizers")
if word_index is not None:
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index)))
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int:
"""
Get the word in the original string corresponding to a character in the original string of
a sequence of the batch.
Can be called as:
- ``self.char_to_word(char_index)`` if batch size is 1
- ``self.char_to_word(batch_index, char_index)`` if batch size is greater than 1
This method is particularly suited when the input sequences are provided as
pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
to easily associate encoded tokens with provided tokenized words.
Args:
batch_or_char_index (:obj:`int`):
Index of the sequence in the batch. If the batch only comprise one sequence,
this can be the index of the character in the orginal string.
char_index (:obj:`int`, `optional`):
If a batch index is provided in `batch_or_token_index`, this can be the index
of the character in the orginal string.
Returns:
:obj:`int` or :obj:`List[int]`:
Index or indices of the associated encoded token(s).
"""
if not self._encodings:
raise ValueError("char_to_word() is not available when using Python based tokenizers")
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_word(char_index)
def convert_to_tensors(
self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
):
"""
Convert the inner content to tensors.
Args:
tensor_type (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`):
The type of tensors to use. If :obj:`str`, should be one of the values of the enum
:class:`~transformers.tokenization_utils_base.TensorType`. If :obj:`None`, no modification is done.
prepend_batch_axis (:obj:`int`, `optional`, defaults to :obj:`False`):
Whether or not to add the batch dimension during the conversion.
"""
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW and is_tf_available():
as_tensor = tf.constant
elif tensor_type == TensorType.PYTORCH and is_torch_available():
as_tensor = torch.tensor
elif tensor_type == TensorType.NUMPY:
as_tensor = np.asarray
else:
raise ImportError(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
tensor_type
)
)
# Do the tensor conversion in batch
for key, value in self.items():
try:
if prepend_batch_axis:
value = [value]
tensor = as_tensor(value)
# Removing this for now in favor of controling the shape with `prepend_batch_axis`
# # at-least2d
# if tensor.ndim > 2:
# tensor = tensor.squeeze(0)
# elif tensor.ndim < 2:
# tensor = tensor[None, :]
self[key] = tensor
except: # noqa E722
if key == "overflowing_tokens":
raise ValueError(
"Unable to create tensor returning overflowing tokens of different lengths. "
"Please see if a fast version of this tokenizer is available to have this feature available."
)
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length."
)
return self
@torch_required
def to(self, device: str) -> "BatchEncoding":
"""
Send all values to device by calling :obj:`v.to(device)` (PyTorch only).
Args:
device (:obj:`str` or :obj:`torch.device`): The device to put the tensors on.
Returns:
:class:`~transformers.BatchEncoding`:
The same instance of :class:`~transformers.BatchEncoding` after modification.
"""
self.data = {k: v.to(device) for k, v in self.data.items()}
return self
class SpecialTokensMixin:
"""
A mixin derived by :class:`~transformers.PreTrainedTokenizer` and :class:`~transformers.PreTrainedTokenizerFast`
to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be
used to directly access these special tokens in a model-independant manner and allow to set and update the special
tokens.
Args:
bos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the beginning of a sentence.
eos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the end of a sentence.
unk_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing an out-of-vocabulary token.
sep_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token separating two different sentences in the same input (used by BERT for instance).
pad_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation.
cls_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the class of the input (used by BERT for instance).
mask_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
BERT).
additional_special_tokens (tuple or list of :obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A tuple or a list of additional special tokens.
"""
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
def __init__(self, verbose=True, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
self.verbose = verbose
# We directly set the hidden value to allow initialization with special tokens
# which are not yet in the vocabulary. Necesssary for serialization/de-serialization
# TODO clean this up at some point (probably by sitching to fast tokenizers)
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple"
assert all(isinstance(t, str) for t in value), "One of the tokens is not a string"
setattr(self, key, value)
elif isinstance(value, (str, AddedToken)):
setattr(self, key, value)
else:
raise TypeError(
"special token {} has to be either str or AddedToken but got: {}".format(key, type(value))
)
def sanitize_special_tokens(self) -> int:
"""
Make sure that all the special tokens attributes of the tokenizer (:obj:`tokenizer.mask_token`,
:obj:`tokenizer.cls_token`, etc.) are in the vocabulary.
Add the missing ones to the vocabulary if needed.
Return:
:obj:`int`: The number of tokens added in the vocaulary during the operation.
"""
return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
def add_special_tokens(self, special_tokens_dict: Dict[str, Union[str, AddedToken]]) -> int:
"""
Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
current vocabulary).
Using : obj:`add_special_tokens` will ensure your special tokens can be used in several ways:
- Special tokens are carefully handled by the tokenizer (they are never split).
- You can easily refer to special tokens using tokenizer class attributes like :obj:`tokenizer.cls_token`. This
makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (for instance
:class:`~transformers.BertTokenizer` :obj:`cls_token` is already registered to be :obj`'[CLS]'` and XLM's one
is also registered to be :obj:`'</s>'`).
Args:
special_tokens_dict (dictionary `str` to `str` or :obj:`tokenizers.AddedToken`):
Keys should be in the list of predefined special attributes: [``bos_token``, ``eos_token``,
``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
assign the index of the ``unk_token`` to them).
Returns:
:obj:`int`: Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
if self.verbose:
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
if key == "additional_special_tokens":
assert isinstance(value, (list, tuple)) and all(
isinstance(t, (str, AddedToken)) for t in value
), f"Tokens {value} for key {key} should all be str or AddedToken instances"
added_tokens += self.add_tokens(value, special_tokens=True)
else:
assert isinstance(
value, (str, AddedToken)
), f"Token {value} for key {key} should be a str or an AddedToken instance"
added_tokens += self.add_tokens([value], special_tokens=True)
return added_tokens
def add_tokens(
self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False
) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary.
Args:
new_tokens (:obj:`str`, :obj:`tokenizers.AddedToken` or a list of `str` or :obj:`tokenizers.AddedToken`):
Tokens are only added if they are not already in the vocabulary. :obj:`tokenizers.AddedToken` wraps a
string token to let you personalize its behavior: whether this token should only match against a single
word, whether this token should strip all potential whitespaces on the left side, whether this token
should strip all potential whitespaces on the right side, etc.
special_token (:obj:`bool`, `optional`, defaults to :obj:`False`):
Can be used to specify if the token is a special token. This mostly change the normalization behavior
(special tokens like CLS or [MASK] are usually not lower-cased for instance).
See details for :obj:`tokenizers.AddedToken` in HuggingFace tokenizers library.
Returns:
:obj:`int`: Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
"""
if not new_tokens:
return 0
if not isinstance(new_tokens, (list, tuple)):
new_tokens = [new_tokens]
return self._add_tokens(new_tokens, special_tokens=special_tokens)
@property
def bos_token(self) -> str:
"""
:obj:`str`: Beginning of sentence token. Log an error if used while not having been set.
"""
if self._bos_token is None and self.verbose:
logger.error("Using bos_token, but it is not set yet.")
return None
return str(self._bos_token)
@property
def eos_token(self) -> str:
"""
:obj:`str`: End of sentence token. Log an error if used while not having been set.
"""
if self._eos_token is None and self.verbose:
logger.error("Using eos_token, but it is not set yet.")
return None
return str(self._eos_token)
@property
def unk_token(self) -> str:
"""
:obj:`str`: Unknown token. Log an error if used while not having been set.
"""
if self._unk_token is None and self.verbose:
logger.error("Using unk_token, but it is not set yet.")
return None
return str(self._unk_token)
@property
def sep_token(self) -> str:
"""
:obj:`str`: Separation token, to separate context and query in an input sequence.
Log an error if used while not having been set.
"""
if self._sep_token is None and self.verbose:
logger.error("Using sep_token, but it is not set yet.")
return None
return str(self._sep_token)
@property
def pad_token(self) -> str:
"""
:obj:`str`: Padding token. Log an error if used while not having been set.
"""
if self._pad_token is None and self.verbose:
logger.error("Using pad_token, but it is not set yet.")
return None
return str(self._pad_token)
@property
def cls_token(self) -> str:
"""
:obj:`str`: Classification token, to extract a summary of an input sequence leveraging self-attention along
the full depth of the model. Log an error if used while not having been set.
"""
if self._cls_token is None and self.verbose:
logger.error("Using cls_token, but it is not set yet.")
return None
return str(self._cls_token)
@property
def mask_token(self) -> str:
"""
:obj:`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while
not having been set.
"""
if self._mask_token is None and self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@property
def additional_special_tokens(self) -> List[str]:
"""
:obj:`List[str]`: All the additional special tokens you may want to use. Log an error if used while not having
been set.
"""
if self._additional_special_tokens is None and self.verbose:
logger.error("Using additional_special_tokens, but it is not set yet.")
return None
return [str(tok) for tok in self._additional_special_tokens]
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
@property
def bos_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns :obj:`None` if the token
has not been set.
"""
if self._bos_token is None:
return None
return self.convert_tokens_to_ids(self.bos_token)
@property
def eos_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the end of sentence token in the vocabulary. Returns :obj:`None` if the token has
not been set.
"""
if self._eos_token is None:
return None
return self.convert_tokens_to_ids(self.eos_token)
@property
def unk_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the unknown token in the vocabulary. Returns :obj:`None` if the token has not been
set.
"""
if self._unk_token is None:
return None
return self.convert_tokens_to_ids(self.unk_token)
@property
def sep_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
sequence. Returns :obj:`None` if the token has not been set.
"""
if self._sep_token is None:
return None
return self.convert_tokens_to_ids(self.sep_token)
@property
def pad_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the padding token in the vocabulary. Returns :obj:`None` if the token has not been
set.
"""
if self._pad_token is None:
return None
return self.convert_tokens_to_ids(self.pad_token)
@property
def pad_token_type_id(self) -> int:
"""
:obj:`int`: Id of the padding token type in the vocabulary.
"""
return self._pad_token_type_id
@property
def cls_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input
sequence leveraging self-attention along the full depth of the model.
Returns :obj:`None` if the token has not been set.
"""
if self._cls_token is None:
return None
return self.convert_tokens_to_ids(self.cls_token)
@property
def mask_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
modeling. Returns :obj:`None` if the token has not been set.
"""
if self._mask_token is None:
return None
return self.convert_tokens_to_ids(self.mask_token)
@property
def additional_special_tokens_ids(self) -> List[int]:
"""
:obj:`List[int]`: Ids of all the additional special tokens in the vocabulary.
Log an error if used while not having been set.
"""
return self.convert_tokens_to_ids(self.additional_special_tokens)
@property
def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
"""
:obj:`Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes
(:obj:`cls_token`, :obj:`unk_token`, etc.) to their values (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.).
Convert potential tokens of :obj:`tokenizers.AddedToken` type to string.
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = str(attr_value)
return set_attr
@property
def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
"""
:obj:`Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary
mapping special token class attributes (:obj:`cls_token`, :obj:`unk_token`, etc.) to their values
(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.).
Don't convert tokens of :obj:`tokenizers.AddedToken` type to string so they can be used to control more finely
how special tokens are tokenized.
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self) -> List[str]:
"""
:obj:`List[str]`: All the special tokens (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class attributes.
Convert tokens of :obj:`tokenizers.AddedToken` type to string.
"""
all_toks = [str(s) for s in self.all_special_tokens_extended]
return all_toks
@property
def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
"""
:obj:`List[Union[str, tokenizers.AddedToken]]`: All the special tokens (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.)
mapped to class attributes.
Don't convert tokens of :obj:`tokenizers.AddedToken` type to string so they can be used to control more finely
how special tokens are tokenized.
"""
all_toks = []
set_attr = self.special_tokens_map_extended
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(OrderedDict.fromkeys(all_toks))
return all_toks
@property
def all_special_ids(self) -> List[int]:
"""
:obj:`List[int]`: List the ids of the special tokens(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class
attributes.
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
ENCODE_KWARGS_DOCSTRING = r"""
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`):
Activates and controls padding. Accepts the following values:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`):
Activates and controls truncation. Accepts the following values:
* :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
provided. This will truncate token by token, removing a token from the longest sequence in the pair
if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
sequence lengths greater than the model maximum admissible input size).
max_length (:obj:`int`, `optional`):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
length is required by one of the truncation/padding parameters. If the model has no specific maximum
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (:obj:`int`, `optional`, defaults to 0):
If set to a number along with :obj:`max_length`, the overflowing tokens returned when
:obj:`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
is_pretokenized (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the input is already pre-tokenized (e.g., split into words), in which case the tokenizer
will skip the pre-tokenization step. This is useful for NER or token classification.
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
"""
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
return_token_type_ids (:obj:`bool`, `optional`):
Whether to return token type IDs. If left to the default, will return the token type IDs according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`__
return_attention_mask (:obj:`bool`, `optional`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return overflowing token sequences.
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter or not to return special tokens mask information.
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return :obj:`(char_start, char_end)` for each token.
This is only available on fast tokenizers inheriting from
:class:`~transformers.PreTrainedTokenizerFast`, if using Python's tokenizer, this method will raise
:obj:`NotImplementedError`.
return_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return the lengths of the encoded inputs.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to print informations and warnings.
**kwargs: passed to the :obj:`self.tokenize()` method
Return:
:class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
`What are input IDs? <../glossary.html#input-ids>`__
- **token_type_ids** -- List of token type ids to be fed to a model (when :obj:`return_token_type_ids=True`
or if `"token_type_ids"` is in :obj:`self.model_input_names`).
`What are token type IDs? <../glossary.html#token-type-ids>`__
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
:obj:`return_attention_mask=True` or if `"attention_mask"` is in :obj:`self.model_input_names`).
`What are attention masks? <../glossary.html#attention-mask>`__
- **overflowing_tokens** -- List of overflowing tokens sequences (when a :obj:`max_length` is specified and
:obj:`return_overflowing_tokens=True`).
- **num_truncated_tokens** -- Number of tokens truncated (when a :obj:`max_length` is specified and
:obj:`return_overflowing_tokens=True`).
- **special_tokens_mask** -- List of 0s and 1s, with 0 specifying added special tokens and 1 specifying
regual sequence tokens (when :obj:`add_special_tokens=True` and :obj:`return_special_tokens_mask=True`).
- **length** -- The length of the inputs (when :obj:`return_length=True`)
"""
INIT_TOKENIZER_DOCSTRING = r"""
Class attributes (overridden by derived classes)
- **vocab_files_names** (:obj:`Dict[str, str]`) -- A ditionary with, as keys, the ``__init__`` keyword name of
each vocabulary file required by the model, and as associated values, the filename for saving the associated
file (string).
- **pretrained_vocab_files_map** (:obj:`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the
low-level being the :obj:`short-cut-names` of the pretrained models with, as associated values, the
:obj:`url` to the associated pretrained vocabulary file.
- **max_model_input_sizes** (:obj:`Dict[str, Optinal[int]]`) -- A dictionary with, as keys, the
:obj:`short-cut-names` of the pretrained models, and as associated values, the maximum length of the sequence
inputs of this model, or :obj:`None` if the model has no maximum input size.
- **pretrained_init_configuration** (:obj:`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
:obj:`short-cut-names` of the pretrained models, and as associated values, a dictionnary of specific
arguments to pass to the ``__init__`` method of the tokenizer class for this pretrained model when loading the
tokenizer with the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`
method.
- **model_input_names** (:obj:`List[str]`) -- A list of inputs expected in the forward pass of the model.
- **padding_side** (:obj:`str`) -- The default value for the side on which the model should have padding
applied. Should be :obj:`'right'` or :obj:`'left'`.
Args:
model_max_length (:obj:`int`, `optional`):
The maximum length (in number of tokens) for the inputs to the transformer model.
When the tokenizer is loaded with
:meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`, this will be set to
the value stored for the associated model in ``max_model_input_sizes`` (see above). If no value is
provided, will default to VERY_LARGE_INTEGER (:obj:`int(1e30)`).
padding_side: (:obj:`str`, `optional`):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
model_input_names (:obj:`List[string]`, `optional`):
The list of inputs accepted by the forward pass of the model (like :obj:`"token_type_ids"` or
:obj:`"attention_mask"`). Default value is picked from the class attribute of the same name.
bos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the beginning of a sentence. Will be associated to ``self.bos_token`` and
``self.bos_token_id``.
eos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the end of a sentence. Will be associated to ``self.eos_token`` and
``self.eos_token_id``.
unk_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing an out-of-vocabulary token. Will be associated to ``self.unk_token`` and
``self.unk_token_id``.
sep_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token separating two different sentences in the same input (used by BERT for instance). Will be
associated to ``self.sep_token`` and ``self.sep_token_id``.
pad_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation. Will be associated to ``self.pad_token`` and
``self.pad_token_id``.
cls_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing the class of the input (used by BERT for instance). Will be associated to
``self.cls_token`` and ``self.cls_token_id``.
mask_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
BERT). Will be associated to ``self.mask_token`` and ``self.mask_token_id``.
additional_special_tokens (tuple or list of :obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
A tuple or a list of additional special tokens. Add them here to ensure they won't be split by the
tokenization process. Will be associated to ``self.additional_special_tokens`` and
``self.additional_special_tokens_ids``.
"""
PREPARE_SEQ2SEQ_BATCH_DOCSTRING = """
Arguments:
src_texts: (:obj:`list`):
list of documents to summarize or source language texts
tgt_texts: (:obj:`list`, `optional`):
list of tgt language texts or summaries.
max_length (:obj:`int`, `optional`):
Controls the maximum length for encoder inputs (documents to summarize or source language texts)
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
length is required by one of the truncation/padding parameters. If the model has no specific maximum
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
max_target_length (:obj:`int`, `optional`):
Controls the maximum length of decoder inputs (target language texts or summaries)
If left unset or set to :obj:`None`, this will use the max_length value.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`):
Activates and controls padding. Accepts the following values:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`):
Activates and controls truncation. Accepts the following values:
* :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
provided. This will truncate token by token, removing a token from the longest sequence in the pair
if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
sequence lengths greater than the model maximum admissible input size).
Return:
:class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields:
- **input_ids** -- List of token ids to be fed to the encoder.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
- **decoder_input_ids** -- List of token ids to be fed to the decoder.
- **decoder_attention_mask** -- List of indices specifying which tokens should be attended to by the decoder.
This does not include causal mask, which is built by the model.
The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``,
will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys.
"""
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizerBase(SpecialTokensMixin):
"""
Base class for :class:`~transformers.PreTrainedTokenizer` and :class:`~transformers.PreTrainedTokenizerFast`.
Handles shared (mostly boiler plate) methods for those two classes.
"""
vocab_files_names: Dict[str, str] = {}
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
max_model_input_sizes: Dict[str, Optional[int]] = {}
model_input_names: List[str] = ["token_type_ids", "attention_mask"]
padding_side: str = "right"
def __init__(self, **kwargs):
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
self.init_inputs = ()
self.init_kwargs = kwargs
# For backward compatibility we fallback to set model_max_length from max_len if provided
model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None))
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
# Padding side is right by default and overridden in subclasses. If specified in the kwargs, it is changed.
self.padding_side = kwargs.pop("padding_side", self.padding_side)
assert self.padding_side in [
"right",
"left",
], f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
super().__init__(**kwargs)
@property
def max_len(self) -> int:
"""
:obj:`int`: **Deprecated** Kept here for backward compatibility. Now renamed to :obj:`model_max_length` to
avoid ambiguity.
"""
warnings.warn(
"The `max_len` attribute has been deprecated and will be removed in a future version, use `model_max_length` instead.",
FutureWarning,
)
return self.model_max_length
@property
def max_len_single_sentence(self) -> int:
"""
:obj:`int`: The maximum length of a sentence that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
@property
def max_len_sentences_pair(self) -> int:
"""
:obj:`int`: The maximum combined length of a pair of sentences that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
@max_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
# For backward compatibility, allow to try to setup 'max_len_single_sentence'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
logger.warning(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
)
@max_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
# For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
logger.warning(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
else:
raise ValueError(
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase` (or a derived class) from
a predefined tokenizer.
Args:
pretrained_model_name_or_path (:obj:`str`):
Can be either:
- A string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.,
``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved
using the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`
method, e.g., ``./my_model_directory/``.
- (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
``./my_model_directory/vocab.txt``.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Attempt to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each
request.
inputs (additional positional arguments, `optional`):
Will be passed along to the Tokenizer ``__init__`` method.
kwargs (additional keyword arguments, `optional`):
Will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like
``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``,
``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__`` for more details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizerBase` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (
cls.pretrained_init_configuration
and pretrained_model_name_or_path in cls.pretrained_init_configuration
):
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy()
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path
)
)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
if len(cls.vocab_files_names) > 1:
raise ValueError(
"Calling {}.from_pretrained() with the path to a single file or url is not supported."
"Use a model identifier or the path to a directory instead.".format(cls.__name__)
)
logger.warning(
"Calling {}.from_pretrained() with the path to a single file or url is deprecated".format(
cls.__name__
)
)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE,
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
"full_tokenizer_file": FULL_TOKENIZER_FILE,
}
# Look for the tokenizer files
for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
else:
full_file_name = hf_bucket_url(
pretrained_model_name_or_path, filename=file_name, use_cdn=False, mirror=None
)
vocab_files[file_id] = full_file_name
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = (
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path or url to a directory containing vocabulary files "
"named {}, but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
raise EnvironmentError(msg)
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
raise EnvironmentError(
"Model name '{}' was not found in tokenizers model name list ({}). "
"We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files "
"named {} but couldn't find such vocabulary files at this path or url.".format(
pretrained_model_name_or_path,
", ".join(s3_models),
pretrained_model_name_or_path,
list(cls.vocab_files_names.values()),
)
)
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(file_path, resolved_vocab_files[file_id]))
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if model_max_length is not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# If there is a complementary special token map, load it
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if isinstance(value, dict):
value = AddedToken(**value)
elif isinstance(value, list):
value = [AddedToken(**token) if isinstance(token, dict) else token for token in value]
setattr(tokenizer, key, value)
# Add supplementary tokens.
special_tokens = tokenizer.all_special_tokens
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
# Sort added tokens by index
added_tok_encoder_sorted = list(sorted(added_tok_encoder.items(), key=lambda x: x[1]))
for token, index in added_tok_encoder_sorted:
assert index == len(tokenizer), (
f"Non-consecutive added token '{token}' found. "
f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
)
tokenizer.add_tokens(token, special_tokens=bool(token in special_tokens))
# Check all our special tokens are registrered as "no split" token (we don't cut them) and are in the vocab
added_tokens = tokenizer.sanitize_special_tokens()
if added_tokens:
logger.warning(
"Special tokens have been added in the vocabulary, make sure the associated word emebedding are fine-tuned or trained."
)
return tokenizer
def save_pretrained(self, save_directory: str) -> Tuple[str]:
"""
Save the tokenizer vocabulary files together with:
- added tokens,
- special tokens to class attributes mapping,
- tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).
This method make sure the full tokenizer can then be re-loaded using the
:meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained` class method.
.. Warning::
This won't save modifications you may have applied to the tokenizer after the instantiation (for instance,
modifying :obj:`tokenizer.do_lower_case` after creation).
Args:
save_directory (:obj:`str`): The path to adirectory where the tokenizer will be saved.
Returns:
A tuple of :obj:`str`: The files saved.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
write_dict = {}
for key, value in self.special_tokens_map_extended.items():
if isinstance(value, AddedToken):
write_dict[key] = value.__getstate__()
elif isinstance(value, list):
write_dict[key] = [
token.__getstate__() if isinstance(token, AddedToken) else token for token in value
]
else:
write_dict[key] = value
f.write(json.dumps(write_dict, ensure_ascii=False))
added_vocab = self.get_added_vocab()
if added_vocab:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(added_vocab, ensure_ascii=False)
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
@add_end_docstrings(
ENCODE_KWARGS_DOCSTRING,
"""
**kwargs: Passed along to the `.tokenize()` method.
""",
"""
Returns:
:obj:`List[int]`, :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`:
The tokenized ids of the text.
""",
)
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> List[int]:
"""
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the ``tokenize`` method) or a list of integers (tokenized string ids using the
``convert_tokens_to_ids`` method).
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the ``tokenize`` method) or a list of integers (tokenized string ids using the
``convert_tokens_to_ids`` method).
"""
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def num_special_tokens_to_add(self, pair: bool = False) -> int:
raise NotImplementedError
def _get_padding_truncation_strategies(
self, padding=False, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
):
"""
Find the correct padding/truncation strategy with backward compatibility
for old arguments (truncation_strategy and pad_to_max_length) and behaviors.
"""
old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate")
old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
# Backward compatibility for previous behavior, maybe we should deprecate it:
# If you only set max_length, it activates truncation for max_length
if max_length is not None and padding is False and truncation is False:
if verbose:
logger.warning(
"Truncation was not explicitely activated but `max_length` is provided a specific value, "
"please use `truncation=True` to explicitely truncate examples to max length. "
"Defaulting to 'longest_first' truncation strategy. "
"If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy "
"more precisely by providing a specific strategy to `truncation`."
)
truncation = "longest_first"
# Get padding strategy
if padding is False and old_pad_to_max_length:
if verbose:
warnings.warn(
"The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
"use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
"use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
"maximal input size of the model (e.g. 512 for Bert).",
FutureWarning,
)
if max_length is None:
padding_strategy = PaddingStrategy.LONGEST
else:
padding_strategy = PaddingStrategy.MAX_LENGTH
elif padding is not False:
if padding is True:
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
# Get truncation strategy
if truncation is False and old_truncation_strategy != "do_not_truncate":
if verbose:
warnings.warn(
"The `truncation_strategy` argument is deprecated and will be removed in a future version, "
"use `truncation=True` to truncate examples to a max length. You can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the "
"maximal input size of the model (e.g. 512 for Bert). "
" If you have pairs of inputs, you can give a specific truncation strategy selected among "
"`truncation='only_first'` (will only truncate the first sentence in the pairs) "
"`truncation='only_second'` (will only truncate the second sentence in the pairs) "
"or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).",
FutureWarning,
)
truncation_strategy = TruncationStrategy(old_truncation_strategy)
elif truncation is not False:
if truncation is True:
truncation_strategy = (
TruncationStrategy.LONGEST_FIRST
) # Default to truncate the longest sequences in pairs of inputs
elif not isinstance(truncation, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation)
else:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
if self.model_max_length > LARGE_INTEGER:
if verbose:
logger.warning(
"Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no padding."
)
padding_strategy = PaddingStrategy.DO_NOT_PAD
else:
max_length = self.model_max_length
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
if self.model_max_length > LARGE_INTEGER:
if verbose:
logger.warning(
"Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no truncation."
)
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
else:
max_length = self.model_max_length
# Test if we have a padding token
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0):
raise ValueError(
"Asking to pad but the tokenizer does not have a padding token. "
"Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
)
# Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and padding_strategy != PaddingStrategy.DO_NOT_PAD
and pad_to_multiple_of is not None
and max_length is not None
and (max_length % pad_to_multiple_of != 0)
):
raise ValueError(
f"Truncation and padding are both activated but "
f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
)
return padding_strategy, truncation_strategy, max_length, kwargs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences.
Args:
text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded.
Each sequence can be a string or a list of strings (pretokenized string).
If the sequences are provided as list of strings (pretokenized), you must set
:obj:`is_pretokenized=True` (to lift the ambiguity with a batch of sequences).
text_pair (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded.
Each sequence can be a string or a list of strings (pretokenized string).
If the sequences are provided as list of strings (pretokenized), you must set
:obj:`is_pretokenized=True` (to lift the ambiguity with a batch of sequences).
"""
# Input type checking for clearer error
assert isinstance(text, str) or (
isinstance(text, (list, tuple))
and (
len(text) == 0
or (
isinstance(text[0], str)
or (isinstance(text[0], (list, tuple)) and (len(text[0]) == 0 or isinstance(text[0][0], str)))
)
)
), (
"text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
assert (
text_pair is None
or isinstance(text_pair, str)
or (
isinstance(text_pair, (list, tuple))
and (
len(text_pair) == 0
or (
isinstance(text_pair[0], str)
or (
isinstance(text_pair[0], (list, tuple))
and (len(text_pair[0]) == 0 or isinstance(text_pair[0][0], str))
)
)
)
)
), (
"text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
is_batched = bool(
(not is_pretokenized and isinstance(text, (list, tuple)))
or (is_pretokenized and isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)))
)
if is_batched:
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_pretokenized=is_pretokenized,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_pretokenized=is_pretokenized,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences.
.. warning::
This method is deprecated, ``__call__`` should be used instead.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the latter only for not-fast tokenizers)):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the ``tokenize`` method) or a list of integers (tokenized string ids using the
``convert_tokens_to_ids`` method).
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the ``tokenize`` method) or a list of integers (tokenized string ids using the
``convert_tokens_to_ids`` method).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_pretokenized=is_pretokenized,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
.. warning::
This method is deprecated, ``__call__`` should be used instead.
Args:
batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`, :obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also :obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`):
Batch of sequences or pair of sequences to be encoded.
This can be a list of string/string-sequences/int-sequences or a list of pair of
string/string-sequences/int-sequence (see details in ``encode_plus``).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_pretokenized=is_pretokenized,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
def pad(
self,
encoded_inputs: Union[
BatchEncoding,
List[BatchEncoding],
Dict[str, EncodedInput],
Dict[str, List[EncodedInput]],
List[Dict[str, EncodedInput]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
) -> BatchEncoding:
"""
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch.
Padding side (left/right) padding token ids are defined at the tokenizer level
(with ``self.padding_side``, ``self.pad_token_id`` and ``self.pad_token_type_id``)
.. note::
If the ``encoded_inputs`` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
result will use the same type unless you provide a different tensor type with ``return_tensors``. In the
case of PyTorch tensors, you will lose the specific device of your tensors however.
Args:
encoded_inputs (:class:`~transformers.BatchEncoding`, list of :class:`~transformers.BatchEncoding`, :obj:`Dict[str, List[int]]`, :obj:`Dict[str, List[List[int]]` or :obj:`List[Dict[str, List[int]]]`):
Tokenized inputs. Can represent one input (:class:`~transformers.BatchEncoding` or
:obj:`Dict[str, List[int]]`) or a batch of tokenized inputs (list of
:class:`~transformers.BatchEncoding`, `Dict[str, List[List[int]]]` or `List[Dict[str, List[int]]]`) so
you can use this method during preprocessing as well as in a PyTorch Dataloader collate function.
Instead of :obj:`List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors),
see the note above for the return type.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask (:obj:`bool`, `optional`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to print informations and warnings.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)):
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
assert "input_ids" in encoded_inputs, (
"You should supply an encoding or a list of encodings to this method. "
"An encoding is the output of one the encoding methods of the tokenizer, i.e. "
"__call__/encode_plus/batch_encode_plus. "
)
if not encoded_inputs["input_ids"]:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = encoded_inputs["input_ids"][0]
if isinstance(first_element, (list, tuple)) and first_element:
first_element = first_element[0]
if not isinstance(first_element, int):
if is_tf_available() and isinstance(first_element, tf.Tensor):
return_tensors = "tf" if return_tensors is None else return_tensors
elif is_torch_available() and isinstance(first_element, torch.Tensor):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
def to_py_obj(obj):
if isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_available() and isinstance(obj, tf.Tensor):
return obj.numpy().tolist()
elif is_torch_available() and isinstance(obj, torch.Tensor):
return obj.cpu().tolist()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
# Convert padding_strategy in PaddingStrategy
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
if encoded_inputs["input_ids"] and not isinstance(encoded_inputs["input_ids"][0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(encoded_inputs["input_ids"])
assert all(
len(v) == batch_size for v in encoded_inputs.values()
), "Some items in the output dictionnary have a different batch size than others."
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in encoded_inputs["input_ids"])
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
outputs = self._pad(
inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create the token type IDs corresponding to the sequences passed.
`What are token type IDs? <../glossary.html#token-type-ids>`__
Should be overriden in a subclass if the model has a special way of building those.
Args:
token_ids_0 (:obj:`List[int]`): The first tokenized sequence.
token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence.
Returns:
:obj:`List[int]`: The token type ids.
"""
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
This implementation does not add special tokens and this method should be overriden in a subclass.
Args:
token_ids_0 (:obj:`List[int]`): The first tokenized sequence.
token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence.
Returns:
:obj:`List[int]`: The model input with special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
ids (:obj:`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the
``tokenize`` and ``convert_tokens_to_ids`` methods.
pair_ids (:obj:`List[int]`, `optional`):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the
``tokenize`` and ``convert_tokens_to_ids`` methods.
"""
if "return_lengths" in kwargs:
if verbose:
warnings.warn(
"The PreTrainedTokenizerBase.prepare_for_model `return_lengths` parameter is deprecated. "
"Please use `return_length` instead.",
FutureWarning,
)
return_length = kwargs["return_lengths"]
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Compute the total size of the returned encodings
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Check lengths
if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose:
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.model_max_length)
)
# Padding
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def truncate_sequences(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
stride: int = 0,
) -> Tuple[List[int], List[int], List[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (:obj:`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the
``tokenize`` and ``convert_tokens_to_ids`` methods.
pair_ids (:obj:`List[int]`, `optional`):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the
``tokenize`` and ``convert_tokens_to_ids`` methods.
num_tokens_to_remove (:obj:`int`, `optional`, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation (:obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`):
The strategy to follow for truncation. Can be:
* :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
provided. This will truncate token by token, removing a token from the longest sequence in the pair
if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
* :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
sequence lengths greater than the model maximum admissible input size).
max_length (:obj:`int`, `optional`):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
length is required by one of the truncation/padding parameters. If the model has no specific maximum
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (:obj:`int`, `optional`, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
Returns:
:obj:`Tuple[List[int], List[int], List[int]]`:
The truncated ``ids``, the truncated ``pair_ids`` and the list of overflowing tokens.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
if not overflowing_tokens:
window_len = min(len(ids), stride + 1)
else:
window_len = 1
overflowing_tokens.extend(ids[-window_len:])
ids = ids[:-1]
else:
if not overflowing_tokens:
window_len = min(len(pair_ids), stride + 1)
else:
window_len = 1
overflowing_tokens.extend(pair_ids[-window_len:])
pair_ids = pair_ids[:-1]
elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input"
f"but the first sequence has a length {len(ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_second'."
)
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input"
f"but the second sequence has a length {len(pair_ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
f"for instance 'longest_first' or 'only_first'."
)
return (ids, pair_ids, overflowing_tokens)
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined legnth or max length in the batch)
Args:
encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(encoded_inputs["input_ids"])
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = (
padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs["input_ids"]) != max_length
)
if needs_to_be_padded:
difference = max_length - len(encoded_inputs["input_ids"])
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"])
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
return encoded_inputs
def batch_decode(
self, sequences: List[List[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> List[str]:
"""
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (:obj:`List[List[int]]`):
List of tokenized input ids. Can be obtained using the ``__call__`` method.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to clean up the tokenization spaces.
Returns:
:obj:`List[str]`: The list of decoded sentences.
"""
return [
self.decode(
seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces
)
for seq in sequences
]
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids (:obj:`List[int]`):
List of tokenized input ids. Can be obtained using the ``__call__`` method.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to clean up the tokenization spaces.
Returns:
:obj:`str`: The decoded sentence.
"""
raise NotImplementedError
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0 (:obj:`List[int]`):
List of ids of the first sequence.
token_ids_1 (:obj:`List[int]`, `optional`):
List of ids of the second sequence.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter or not the token list is already formated with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
assert already_has_special_tokens and token_ids_1 is None, (
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
"Please use a slow (full python) tokenizer to activate this argument."
"Or set `return_special_token_mask=True` when calling the encoding method "
"to get the special tokens mask in any tokenizer. "
)
all_special_ids = self.all_special_ids # cache the property
special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0]
return special_tokens_mask
@staticmethod
def clean_up_tokenization(out_string: str) -> str:
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
Args:
out_string (:obj:`str`): The text to clean up.
Returns:
:obj:`str`: The cleaned-up string.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string |
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer | maintainer | container | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any
class Container(abc.ABC):
def __init__(self, name: str):
self.name = name
self._container = None
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> Any:
"""
Run command inside container
Args:
command: command to execute
Returns:
Any
"""
pass
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ProjectionPlugin | taco2ProjectionPlugin | taco2ProjectionKernel | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_PROJECTIONKERNEL_H
#define TT2I_PROJECTIONKERNEL_H
#include "cudaMemory.h"
#include <vector>
namespace nvinfer1
{
namespace plugin
{
class Taco2ProjectionKernel
{
public:
/**
* @brief Create a new Taco2ProjectionKernel.
*
* @param fc1WeightsHost The weights of the first fully connected layer.
* @param fc2WeightsHost The weights of the second fully connected layer.
* @param inputLength The length of the input.
* @param numDimension The number of dimensions of the FC layers.
*/
Taco2ProjectionKernel(const std::vector<float>& fcWeightsHost, const std::vector<float>& fcBiasHost,
int inputLength1, int inputLength2, int numDimension);
/**
* @brief Execute this kernel.
*
* @param input1Device The first input on the device.
* @param input2Device The second input on the device.
* @param outputDevice THe output on the device.
* @param scratchDevice The scratch space on the device.
* @param stream The stream to operate on.
*/
void execute(const float* input1Device, const float* input2Device, float* outputDevice, cudaStream_t stream);
private:
int mInput1Length;
int mInput2Length;
int mInputLength;
int mNumDimension;
tts::CudaMemory<float> mWeightsDevice;
tts::CudaMemory<float> mBiasDevice;
};
} // namespace plugin
} // namespace nvinfer1
#endif
|
PyTorch/SpeechSynthesis/FastPitch/filelists | filelists | ljs_audio_text_test | wavs/LJ045-0096.wav|Mrs. De Mohrenschildt thought that Oswald,
wavs/LJ049-0022.wav|The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.
wavs/LJ033-0042.wav|Between the hours of eight and nine p.m. they were occupied with the children in the bedrooms located at the extreme east end of the house.
wavs/LJ016-0117.wav|The prisoner had nothing to deal with but wooden panels, and by dint of cutting and chopping he got both the lower panels out.
wavs/LJ025-0157.wav|Under these circumstances, unnatural as they are, with proper management, the bean will thrust forth its radicle and its plumule;
wavs/LJ042-0219.wav|Oswald demonstrated his thinking in connection with his return to the United States by preparing two sets of identical questions of the type which he might have thought
wavs/LJ032-0164.wav|it is not possible to state with scientific certainty that a particular small group of fibers come from a certain piece of clothing
wavs/LJ046-0092.wav|has confidence in the dedicated Secret Service men who are ready to lay down their lives for him
wavs/LJ050-0118.wav|Since these agencies are already obliged constantly to evaluate the activities of such groups,
wavs/LJ043-0016.wav|Jeanne De Mohrenschildt said, quote,
wavs/LJ021-0078.wav|no economic panacea, which could simply revive over-night the heavy industries and the trades dependent upon them.
wavs/LJ039-0148.wav|Examination of the cartridge cases found on the sixth floor of the Depository Building
wavs/LJ047-0202.wav|testified that the information available to the Federal Government about Oswald before the assassination would, if known to PRS,
wavs/LJ023-0056.wav|It is an easy document to understand when you remember that it was called into being
wavs/LJ021-0025.wav|And in many directions, the intervention of that organized control which we call government
wavs/LJ030-0105.wav|Communications in the motorcade.
wavs/LJ021-0012.wav|with respect to industry and business, but nearly all are agreed that private enterprise in times such as these
wavs/LJ019-0169.wav|and one or two men were allowed to mend clothes and make shoes. The rules made by the Secretary of State were hung up in conspicuous parts of the prison;
wavs/LJ039-0088.wav|It just is an aid in seeing in the fact that you only have the one element, the crosshair,
wavs/LJ016-0192.wav|"I think I could do that sort of job," said Calcraft, on the spur of the moment.
wavs/LJ014-0142.wav|was strewn in front of the dock, and sprinkled it towards the bench with a contemptuous gesture.
wavs/LJ012-0015.wav|Weedon and Lecasser to twelve and six months respectively in Coldbath Fields.
wavs/LJ048-0033.wav|Prior to November twenty-two, nineteen sixty-three
wavs/LJ028-0349.wav|who were each required to send so large a number to Babylon, that in all there were collected no fewer than fifty thousand.
wavs/LJ030-0197.wav|At first Mrs. Connally thought that her husband had been killed,
wavs/LJ017-0133.wav|Palmer speedily found imitators.
wavs/LJ034-0123.wav|Although Brennan testified that the man in the window was standing when he fired the shots, most probably he was either sitting or kneeling.
wavs/LJ003-0282.wav|Many years were to elapse before these objections should be fairly met and universally overcome.
wavs/LJ032-0204.wav|Special Agent Lyndal L. Shaneyfelt, a photography expert with the FBI,
wavs/LJ016-0241.wav|Calcraft served the city of London till eighteen seventy-four, when he was pensioned at the rate of twenty-five shillings per week.
wavs/LJ023-0033.wav|we will not allow ourselves to run around in new circles of futile discussion and debate, always postponing the day of decision.
wavs/LJ009-0286.wav|There has never been much science in the system of carrying out the extreme penalty in this country; the "finisher of the law"
wavs/LJ008-0181.wav|he had his pockets filled with bread and cheese, and it was generally supposed that he had come a long distance to see the fatal show.
wavs/LJ015-0052.wav|to the value of twenty thousand pounds.
wavs/LJ016-0314.wav|Sir George Grey thought there was a growing feeling in favor of executions within the prison precincts.
wavs/LJ047-0056.wav|From August nineteen sixty-two
wavs/LJ010-0027.wav|Nor did the methods by which they were perpetrated greatly vary from those in times past.
wavs/LJ010-0065.wav|At the former the "Provisional Government" was to be established,
wavs/LJ046-0113.wav|The Commission has concluded that at the time of the assassination
wavs/LJ028-0410.wav|There among the ruins they still live in the same kind of houses,
wavs/LJ044-0137.wav|More seriously, the facts of his defection had become known, leaving him open to almost unanswerable attack by those who opposed his views.
wavs/LJ008-0215.wav|One by one the huge uprights of black timber were fitted together,
wavs/LJ030-0084.wav|or when the press of the crowd made it impossible for the escort motorcycles to stay in position on the car's rear flanks.
wavs/LJ020-0092.wav|Have yourself called on biscuit mornings an hour earlier than usual.
wavs/LJ029-0096.wav|On November fourteen, Lawson and Sorrels attended a meeting at Love Field
wavs/LJ015-0308.wav|and others who swore to the meetings of the conspirators and their movements. Saward was found guilty,
wavs/LJ012-0067.wav|But Mrs. Solomons could not resist the temptation to dabble in stolen goods, and she was found shipping watches of the wrong category to New York.
wavs/LJ018-0231.wav|namely, to suppress it and substitute another.
wavs/LJ014-0265.wav|and later he became manager of the newly rebuilt Olympic at Wych Street.
wavs/LJ024-0102.wav|would be the first to exclaim as soon as an amendment was proposed
wavs/LJ007-0233.wav|it consists of several circular perforations, about two inches in diameter,
wavs/LJ013-0213.wav|This seems to have decided Courvoisier,
wavs/LJ032-0045.wav|This price included nineteen dollars, ninety-five cents for the rifle and the scope, and one dollar, fifty cents for postage and handling.
wavs/LJ011-0048.wav|Wherefore let him that thinketh he standeth take heed lest he fall," and was full of the most pointed allusions to the culprit.
wavs/LJ005-0294.wav|It was frequently stated in evidence that the jail of the borough was in so unfit a state for the reception of prisoners,
wavs/LJ016-0007.wav|There were others less successful.
wavs/LJ028-0138.wav|perhaps the tales that travelers told him were exaggerated as travelers' tales are likely to be,
wavs/LJ050-0029.wav|that is reflected in definite and comprehensive operating procedures.
wavs/LJ014-0121.wav|The prisoners were in due course transferred to Newgate, to be put upon their trial at the Central Criminal Court.
wavs/LJ014-0146.wav|They had to handcuff her by force against the most violent resistance, and still she raged and stormed,
wavs/LJ046-0111.wav|The Secret Service has attempted to perform this function through the activities of its Protective Research Section
wavs/LJ012-0257.wav|But the affair still remained a profound mystery. No light was thrown upon it till, towards the end of March,
wavs/LJ002-0260.wav|Yet the public opinion of the whole body seems to have checked dissipation.
wavs/LJ031-0014.wav|the Presidential limousine arrived at the emergency entrance of the Parkland Hospital at about twelve:thirty-five p.m.
wavs/LJ047-0093.wav|Oswald was arrested and jailed by the New Orleans Police Department for disturbing the peace, in connection with a street fight which broke out when he was accosted
wavs/LJ003-0324.wav|gaming of all sorts should be peremptorily forbidden under heavy pains and penalties.
wavs/LJ021-0115.wav|we have reached into the heart of the problem which is to provide such annual earnings for the lowest paid worker as will meet his minimum needs.
wavs/LJ046-0191.wav|it had established periodic regular review of the status of four hundred individuals;
wavs/LJ034-0197.wav|who was one of the first witnesses to alert the police to the Depository as the source of the shots, as has been discussed in chapter three.
wavs/LJ002-0253.wav|were governed by rules which they themselves had framed, and under which subscriptions were levied
wavs/LJ048-0288.wav|might have been more alert in the Dallas motorcade if they had retired promptly in Fort Worth.
wavs/LJ007-0112.wav|Many of the old customs once prevalent in the State Side, so properly condemned and abolished,
wavs/LJ017-0189.wav|who was presently attacked in the same way as the others, but, but, thanks to the prompt administration of remedies, he recovered.
wavs/LJ042-0230.wav|basically, although I hate the USSR and socialist system I still think marxism can work under different circumstances, end quote.
wavs/LJ050-0161.wav|The Secret Service should not and does not plan to develop its own intelligence gathering facilities to duplicate the existing facilities of other Federal agencies.
wavs/LJ003-0011.wav|that not more than one bottle of wine or one quart of beer could be issued at one time. No account was taken of the amount of liquors admitted in one day,
wavs/LJ008-0206.wav|and caused a number of stout additional barriers to be erected in front of the scaffold,
wavs/LJ002-0261.wav|The poorer prisoners were not in abject want, as in other prisons,
wavs/LJ012-0189.wav|Hunt, in consideration of the information he had given, escaped death, and was sentenced to transportation for life.
wavs/LJ019-0317.wav|The former, which consisted principally of the tread-wheel, cranks, capstans, shot-drill,
wavs/LJ011-0041.wav|Visited Mr. Fauntleroy. My application for books for him not having been attended, I had no prayer-book to give him.
wavs/LJ023-0089.wav|That is not only my accusation.
wavs/LJ044-0224.wav|would not agree with that particular wording, end quote.
wavs/LJ013-0104.wav|He found them at length residing at the latter place, one as a landed proprietor, the other as a publican.
wavs/LJ013-0055.wav|The jury did not believe him, and the verdict was for the defendants.
wavs/LJ014-0306.wav|These had been attributed to political action; some thought that the large purchases in foreign grains, effected at losing prices,
wavs/LJ029-0052.wav|To supplement the PRS files, the Secret Service depends largely on local police departments and local offices of other Federal agencies
wavs/LJ028-0459.wav|Its bricks, measuring about thirteen inches square and three inches in thickness, were burned and stamped with the usual short inscription:
wavs/LJ017-0183.wav|Soon afterwards Dixon died, showing all the symptoms already described.
wavs/LJ009-0084.wav|At length the ordinary pauses, and then, in a deep tone, which, though hardly above a whisper, is audible to all, says,
wavs/LJ007-0170.wav|That in this vast metropolis, the center of wealth, civilization, and information;
wavs/LJ016-0277.wav|This is proved by contemporary accounts, especially one graphic and realistic article which appeared in the 'Times,'
wavs/LJ009-0061.wav|He staggers towards the pew, reels into it, stumbles forward, flings himself on the ground, and, by a curious twist of the spine,
wavs/LJ019-0201.wav|to select a sufficiently spacious piece of ground, and erect a prison which from foundations to roofs should be in conformity with the newest ideas.
wavs/LJ030-0063.wav|He had repeated this wish only a few days before, during his visit to Tampa, Florida.
wavs/LJ010-0257.wav|a third miscreant made a similar but far less serious attempt in the month of July following.
wavs/LJ009-0106.wav|The keeper tries to appear unmoved, but his eye wanders anxiously over the combustible assembly.
wavs/LJ008-0121.wav|After the construction and action of the machine had been explained, the doctor asked the governor what kind of men he had commanded at Goree,
wavs/LJ050-0069.wav|the Secret Service had received from the FBI some nine thousand reports on members of the Communist Party.
wavs/LJ006-0202.wav|The news-vendor was also a tobacconist,
wavs/LJ012-0230.wav|Shortly before the day fixed for execution, Bishop made a full confession, the bulk of which bore the impress of truth,
wavs/LJ005-0248.wav|and stated that in his opinion Newgate, as the common jail of Middlesex, was wholly inadequate to the proper confinement of its prisoners.
wavs/LJ037-0053.wav|who had been greatly upset by her experience, was able to view a lineup of four men handcuffed together at the police station.
wavs/LJ045-0177.wav|For the first time
wavs/LJ004-0036.wav|it was hoped that their rulers would hire accommodation in the county prisons, and that the inferior establishments would in course of time disappear.
wavs/LJ026-0054.wav|carbohydrates (starch, cellulose) and fats.
wavs/LJ020-0085.wav|Break apart from one another and pile on a plate, throwing a clean doily or a small napkin over them. Break open at table.
wavs/LJ046-0226.wav|The several military intelligence agencies reported crank mail and similar threats involving the President.
wavs/LJ014-0233.wav|he shot an old soldier who had attempted to detain him. He was convicted and executed.
wavs/LJ033-0152.wav|The portion of the palm which was identified was the heel of the right palm, i.e., the area near the wrist, on the little finger side.
wavs/LJ004-0009.wav|as indefatigable and self-sacrificing, found by personal visitation that the condition of jails throughout the kingdom was,
wavs/LJ017-0134.wav|Within a few weeks occurred the Leeds poisoning case, in which the murderer undoubtedly was inspired by the facts made public at Palmer's trial.
wavs/LJ019-0318.wav|was to be the rule for all convicted prisoners throughout the early stages of their detention;
wavs/LJ020-0093.wav|Rise, wash face and hands, rinse the mouth out and brush back the hair.
wavs/LJ012-0188.wav|Probert was then admitted as a witness, and the case was fully proved against Thurtell, who was hanged in front of Hertford Jail.
wavs/LJ019-0202.wav|The preference given to the Pentonville system destroyed all hopes of a complete reformation of Newgate.
wavs/LJ039-0027.wav|Oswald's revolver
wavs/LJ040-0176.wav|He admitted to fantasies about being powerful and sometimes hurting and killing people, but refused to elaborate on them.
wavs/LJ018-0354.wav|Doubts were long entertained whether Thomas Wainwright,
wavs/LJ031-0185.wav|From the Presidential airplane, the Vice President telephoned Attorney General Robert F. Kennedy,
wavs/LJ006-0137.wav|They were not obliged to attend chapel, and seldom if ever went; "prisoners," said one of them under examination, "did not like the trouble of going to chapel."
wavs/LJ032-0085.wav|The Hidell signature on the notice of classification was in the handwriting of Oswald.
wavs/LJ009-0037.wav|the schoolmaster and the juvenile prisoners being seated round the communion-table, opposite the pulpit.
wavs/LJ006-0021.wav|Later on he had devoted himself to the personal investigation of the prisons of the United States.
wavs/LJ006-0082.wav|and this particular official took excellent care to select as residents for his own ward those most suitable from his own point of view.
wavs/LJ016-0380.wav|with hope to the last. There is always the chance of a flaw in the indictment, of a missing witness, or extenuating circumstances.
wavs/LJ019-0344.wav|monitor, or schoolmaster, nor to be engaged in the service of any officer of the prison.
wavs/LJ019-0161.wav|These disciplinary improvements were, however, only slowly and gradually introduced.
wavs/LJ028-0145.wav|And here I may not omit to tell the use to which the mould dug out of the great moat was turned, nor the manner wherein the wall was wrought.
wavs/LJ018-0349.wav|His disclaimer, distinct and detailed on every point, was intended simply for effect.
wavs/LJ043-0010.wav|Some of the members of that group saw a good deal of the Oswalds through the fall of nineteen sixty-three,
wavs/LJ027-0178.wav|These were undoubtedly perennibranchs. In the Permian and Triassic higher forms appeared, which were certainly caducibranch.
wavs/LJ041-0070.wav|He did not rise above the rank of private first class, even though he had passed a qualifying examination for the rank of corporal.
wavs/LJ008-0266.wav|Thus in the years between May first, eighteen twenty-seven, and thirtieth April, eighteen thirty-one,
wavs/LJ021-0091.wav|In this recent reorganization we have recognized three distinct functions:
wavs/LJ019-0129.wav|which marked the growth of public interest in prison affairs, and which was the germ of the new system
wavs/LJ018-0215.wav|William Roupell was the eldest but illegitimate son of a wealthy man who subsequently married Roupell's mother, and had further legitimate issue.
wavs/LJ015-0194.wav|and behaved so as to justify a belief that he had been a jail-bird all his life.
wavs/LJ016-0137.wav|that numbers of men, "lifers," and others with ten, fourteen, or twenty years to do, can be trusted to work out of doors without bolts and bars
wavs/LJ002-0289.wav|the latter raised eighteen pence among them to pay for a truss of straw for the poor woman to lie on.
wavs/LJ023-0016.wav|In nineteen thirty-three you and I knew that we must never let our economic system get completely out of joint again
wavs/LJ011-0141.wav|There were at the moment in Newgate six convicts sentenced to death for forging wills.
wavs/LJ016-0283.wav|to do them mere justice, there was at least till then a half-drunken ribald gaiety among the crowd that made them all akin."
wavs/LJ035-0082.wav|The only interval was the time necessary to ride in the elevator from the second to the sixth floor and walk back to the southeast corner.
wavs/LJ045-0194.wav|Anyone who was familiar with that area of Dallas would have known that the motorcade would probably pass the Texas School Book Depository to get from Main Street
wavs/LJ009-0124.wav|occupied when they saw it last, but a few hours ago, by their comrades who are now dead;
wavs/LJ030-0162.wav|In the Presidential Limousine
wavs/LJ050-0223.wav|The plan provides for an additional two hundred five agents for the Secret Service. Seventeen of this number are proposed for the Protective Research Section;
wavs/LJ008-0228.wav|their harsh and half-cracked voices full of maudlin, besotted sympathy for those about to die.
wavs/LJ002-0096.wav|The eight courts above enumerated were well supplied with water;
wavs/LJ018-0288.wav|After this the other conspirators traveled to obtain genuine bills and master the system of the leading houses at home and abroad.
wavs/LJ002-0106.wav|in which latterly a copper had been fixed for the cooking of provisions sent in by charitable persons.
wavs/LJ025-0129.wav|On each lobe of the bi-lobed leaf of Venus flytrap are three delicate filaments which stand out at right angles from the surface of the leaf.
wavs/LJ044-0013.wav|Hands Off Cuba, end quote, an application form for, and a membership card in,
wavs/LJ049-0115.wav|of the person who is actually in the exercise of the executive power, or
wavs/LJ019-0145.wav|But reformation was only skin deep. Below the surface many of the old evils still rankled.
wavs/LJ019-0355.wav|came up in all respects to modern requirements.
wavs/LJ019-0289.wav|There was unrestrained association of untried and convicted, juvenile with adult prisoners, vagrants, misdemeanants, felons.
wavs/LJ048-0222.wav|in Fort Worth, there occurred a breach of discipline by some members of the Secret Service who were officially traveling with the President.
wavs/LJ016-0367.wav|Under the new system the whole of the arrangements from first to last fell upon the officers.
wavs/LJ047-0097.wav|Agent Quigley did not know of Oswald's prior FBI record when he interviewed him,
wavs/LJ007-0075.wav|as effectually to rebuke and abash the profane spirit of the more insolent and daring of the criminals.
wavs/LJ047-0022.wav|provided by other agencies.
wavs/LJ007-0085.wav|at Newgate and York Castle as long as five years; "at Ilchester and Morpeth for seven years; at Warwick for eight years,
wavs/LJ047-0075.wav|Hosty had inquired earlier and found no evidence that it was functioning in the Dallas area.
wavs/LJ008-0098.wav|One was the "yeoman of the halter," a Newgate official, the executioner's assistant, whom Mr. J. T. Smith, who was present at the execution,
wavs/LJ017-0102.wav|The second attack was fatal, and ended in Cook's death from tetanus.
wavs/LJ046-0105.wav|Second, the adequacy of other advance preparations for the security of the President, during his visit to Dallas,
wavs/LJ018-0206.wav|He was a tall, slender man, with a long face and iron-gray hair.
wavs/LJ012-0271.wav|Whether it was greed or a quarrel that drove Greenacre to the desperate deed remains obscure.
wavs/LJ005-0086.wav|with such further separation as the justices should deem conducive to good order and discipline.
wavs/LJ042-0097.wav|and considerably better living quarters than those accorded to Soviet citizens of equal age and station.
wavs/LJ047-0126.wav|we would handle it in due course, in accord with the whole context of the investigation. End quote.
wavs/LJ041-0022.wav|Oswald first wrote, quote, Edward Vogel, end quote, an obvious misspelling of Voebel's name,
wavs/LJ015-0025.wav|The bank enjoyed an excellent reputation, it had a good connection, and was supposed to be perfectly sound.
wavs/LJ012-0194.wav|But Burke and Hare had their imitators further south,
wavs/LJ028-0416.wav|(if man may speak so confidently of His great impenetrable counsels), for an eternal Testimony of His great work in the confusion of Man's pride,
wavs/LJ007-0130.wav|are all huddled together without discrimination, oversight, or control."
wavs/LJ015-0005.wav|About this time Davidson and Gordon, the people above-mentioned,
wavs/LJ016-0125.wav|with this, placed against the wall near the chevaux-de-frise, he made an escalade.
wavs/LJ014-0224.wav|As Dwyer survived, Cannon escaped the death sentence, which was commuted to penal servitude for life.
wavs/LJ005-0019.wav|refuted by abundant evidence, and having no foundation whatever in truth.
wavs/LJ042-0221.wav|With either great ambivalence, or cold calculation he prepared completely different answers to the same questions.
wavs/LJ001-0063.wav|which was generally more formally Gothic than the printing of the German workmen,
wavs/LJ030-0006.wav|They took off in the Presidential plane, Air Force One, at eleven a.m., arriving at San Antonio at one:thirty p.m., Eastern Standard Time.
wavs/LJ024-0054.wav|democracy will have failed far beyond the importance to it of any king of precedent concerning the judiciary.
wavs/LJ006-0044.wav|the same callous indifference to the moral well-being of the prisoners, the same want of employment and of all disciplinary control.
wavs/LJ039-0154.wav|four point eight to five point six seconds if the second shot missed,
wavs/LJ050-0090.wav|they seem unduly restrictive in continuing to require some manifestation of animus against a Government official.
wavs/LJ028-0421.wav|it was the beginning of the great collections of Babylonian antiquities in the museums of the Western world.
wavs/LJ033-0205.wav|then I would say the possibility exists, these fibers could have come from this blanket, end quote.
wavs/LJ019-0335.wav|The books and journals he was to keep were minutely specified, and his constant presence in or near the jail was insisted upon.
wavs/LJ013-0045.wav|Wallace's relations warned him against his Liverpool friend,
wavs/LJ037-0002.wav|Chapter four. The Assassin: Part six.
wavs/LJ018-0159.wav|This was all the police wanted to know.
wavs/LJ026-0140.wav|In the plant as in the animal metabolism must consist of anabolic and catabolic processes.
wavs/LJ014-0171.wav|I will briefly describe one or two of the more remarkable murders in the years immediately following, then pass on to another branch of crime.
wavs/LJ037-0007.wav|Three others subsequently identified Oswald from a photograph.
wavs/LJ033-0174.wav|microscopic and UV (ultra violet) characteristics, end quote.
wavs/LJ040-0110.wav|he apparently adjusted well enough there to have had an average, although gradually deteriorating, school record
wavs/LJ039-0192.wav|he had a total of between four point eight and five point six seconds between the two shots which hit
wavs/LJ032-0261.wav|When he appeared before the Commission, Michael Paine lifted the blanket
wavs/LJ040-0097.wav|Lee was brought up in this atmosphere of constant money problems, and I am sure it had quite an effect on him, and also Robert, end quote.
wavs/LJ037-0249.wav|Mrs. Earlene Roberts, the housekeeper at Oswald's roominghouse and the last person known to have seen him before he reached tenth Street and Patton Avenue,
wavs/LJ016-0248.wav|Marwood was proud of his calling, and when questioned as to whether his process was satisfactory, replied that he heard "no complaints."
wavs/LJ004-0083.wav|As Mr. Buxton pointed out, many old acts of parliament designed to protect the prisoner were still in full force.
wavs/LJ014-0029.wav|This was Delarue's watch, fully identified as such, which Hocker told his brother Delarue had given him the morning of the murder.
wavs/LJ021-0110.wav|have been best calculated to promote industrial recovery and a permanent improvement of business and labor conditions.
wavs/LJ003-0107.wav|he slept in the same bed with a highwayman on one side, and a man charged with murder on the other.
wavs/LJ039-0076.wav|Ronald Simmons, chief of the U.S. Army Infantry Weapons Evaluation Branch of the Ballistics Research Laboratory, said, quote,
wavs/LJ016-0347.wav|had undoubtedly a solemn, impressive effect upon those outside.
wavs/LJ001-0072.wav|After the end of the fifteenth century the degradation of printing, especially in Germany and Italy,
wavs/LJ024-0018.wav|Consequently, although there never can be more than fifteen, there may be only fourteen, or thirteen, or twelve.
wavs/LJ032-0180.wav|that the fibers were caught in the crevice of the rifle's butt plate, quote, in the recent past, end quote,
wavs/LJ010-0083.wav|and measures taken to arrest them when their plans were so far developed that no doubt could remain as to their guilt.
wavs/LJ002-0299.wav|and gave the garnish for the common side at that sum, which is five shillings more than Mr. Neild says was extorted on the common side.
wavs/LJ048-0143.wav|the Secret Service did not at the time of the assassination have any established procedure governing its relationships with them.
wavs/LJ012-0054.wav|Solomons, while waiting to appear in court, persuaded the turnkeys to take him to a public-house, where all might "refresh."
wavs/LJ019-0270.wav|Vegetables, especially the potato, that most valuable anti-scorbutic, was too often omitted.
wavs/LJ035-0164.wav|three minutes after the shooting.
wavs/LJ014-0326.wav|Maltby and Co. would issue warrants on them deliverable to the importer, and the goods were then passed to be stored in neighboring warehouses.
wavs/LJ001-0173.wav|The essential point to be remembered is that the ornament, whatever it is, whether picture or pattern-work, should form part of the page,
wavs/LJ050-0056.wav|On December twenty-six, nineteen sixty-three, the FBI circulated additional instructions to all its agents,
wavs/LJ003-0319.wav|provided only that their security was not jeopardized, and dependent upon the enforcement of another new rule,
wavs/LJ006-0040.wav|The fact was that the years as they passed, nearly twenty in all, had worked but little permanent improvement in this detestable prison.
wavs/LJ017-0231.wav|His body was found lying in a pool of blood in a night-dress, stabbed over and over again in the left side.
wavs/LJ017-0226.wav|One half of the mutineers fell upon him unawares with handspikes and capstan-bars.
wavs/LJ004-0239.wav|He had been committed for an offense for which he was acquitted.
wavs/LJ048-0112.wav|The Commission also regards the security arrangements worked out by Lawson and Sorrels at Love Field as entirely adequate.
wavs/LJ039-0125.wav|that Oswald was a good shot, somewhat better than or equal to -- better than the average let us say.
wavs/LJ030-0196.wav|He cried out, quote, Oh, no, no, no. My God, they are going to kill us all, end quote,
wavs/LJ010-0228.wav|He was released from Broadmoor in eighteen seventy-eight, and went abroad.
wavs/LJ045-0228.wav|On the other hand, he could have traveled some distance with the money he did have and he did return to his room where he obtained his revolver.
wavs/LJ028-0168.wav|in the other was the sacred precinct of Jupiter Belus,
wavs/LJ021-0140.wav|and in such an effort we should be able to secure for employers and employees and consumers
wavs/LJ009-0280.wav|Again the wretched creature succeeded in obtaining foothold, but this time on the left side of the drop.
wavs/LJ003-0159.wav|To constitute this the aristocratic quarter, unwarrantable demands were made upon the space properly allotted to the female felons,
wavs/LJ016-0274.wav|and the windows of the opposite houses, which commanded a good view, as usual fetched high prices.
wavs/LJ035-0014.wav|it sounded high and I immediately kind of looked up,
wavs/LJ033-0120.wav|which he believed was where the bag reached when it was laid on the seat with one edge against the door.
wavs/LJ045-0015.wav|which Johnson said he did not receive until after the assassination. The letter said in part, quote,
wavs/LJ003-0299.wav|the latter end of the nineteenth century, several of which still fall far short of our English ideal,
wavs/LJ032-0206.wav|After comparing the rifle in the simulated photograph with the rifle in Exhibit Number one thirty-three A, Shaneyfelt testified, quote,
wavs/LJ028-0494.wav|Between the several sections were wide spaces where foot soldiers and charioteers might fight.
wavs/LJ005-0099.wav|and report at length upon the condition of the prisons of the country.
wavs/LJ015-0144.wav|developed to a colossal extent the frauds he had already practiced as a subordinate.
wavs/LJ019-0221.wav|It was intended as far as possible that, except awaiting trial, no prisoner should find himself relegated to Newgate.
wavs/LJ003-0088.wav|in one, for seven years -- that of a man sentenced to death, for whom great interest had been made, but whom it was not thought right to pardon.
wavs/LJ045-0216.wav|nineteen sixty-three, merely to disarm her and to provide a justification of sorts,
wavs/LJ042-0135.wav|that he was not yet twenty years old when he went to the Soviet Union with such high hopes and not quite twenty-three when he returned bitterly disappointed.
wavs/LJ049-0196.wav|On the other hand, it is urged that all features of the protection of the President and his family should be committed to an elite and independent corps.
wavs/LJ018-0278.wav|This was the well and astutely devised plot of the brothers Bidwell,
wavs/LJ030-0238.wav|and then looked around again and saw more of this movement, and so I proceeded to go to the back seat and get on top of him.
wavs/LJ018-0309.wav|where probably the money still remains.
wavs/LJ041-0199.wav|is shown most clearly by his employment relations after his return from the Soviet Union. Of course, he made his real problems worse to the extent
wavs/LJ007-0076.wav|The lax discipline maintained in Newgate was still further deteriorated by the presence of two other classes of prisoners who ought never to have been inmates of such a jail.
wavs/LJ039-0118.wav|He had high motivation. He had presumably a good to excellent rifle and good ammunition.
wavs/LJ024-0019.wav|And there may be only nine.
wavs/LJ008-0085.wav|The fire had not quite burnt out at twelve, in nearly four hours, that is to say.
wavs/LJ018-0031.wav|This fixed the crime pretty certainly upon Müller, who had already left the country, thus increasing suspicion under which he lay.
wavs/LJ030-0032.wav|Dallas police stood at intervals along the fence and Dallas plain clothes men mixed in the crowd.
wavs/LJ050-0004.wav|General Supervision of the Secret Service
wavs/LJ039-0096.wav|This is a definite advantage to the shooter, the vehicle moving directly away from him and the downgrade of the street, and he being in an elevated position
wavs/LJ041-0195.wav|Oswald's interest in Marxism led some people to avoid him,
wavs/LJ047-0158.wav|After a moment's hesitation, she told me that he worked at the Texas School Book Depository near the downtown area of Dallas.
wavs/LJ050-0162.wav|In planning its data processing techniques,
wavs/LJ001-0051.wav|and paying great attention to the "press work" or actual process of printing,
wavs/LJ028-0136.wav|Of all the ancient descriptions of the famous walls and the city they protected, that of Herodotus is the fullest.
wavs/LJ034-0134.wav|Shortly after the assassination Brennan noticed
wavs/LJ019-0348.wav|Every facility was promised. The sanction of the Secretary of State would not be withheld if plans and estimates were duly submitted,
wavs/LJ010-0219.wav|While one stood over the fire with the papers, another stood with lighted torch to fire the house.
wavs/LJ011-0245.wav|Mr. Mullay called again, taking with him five hundred pounds in cash. Howard discovered this, and his manner was very suspicious;
wavs/LJ030-0035.wav|Organization of the Motorcade
wavs/LJ044-0135.wav|While he had drawn some attention to himself and had actually appeared on two radio programs, he had been attacked by Cuban exiles and arrested,
wavs/LJ045-0090.wav|He was very much interested in autobiographical works of outstanding statesmen of the United States, to whom his wife thought he compared himself.
wavs/LJ026-0034.wav|When any given "protist" has to be classified the case must be decided on its individual merits;
wavs/LJ045-0092.wav|as to the fact that he was an outstanding man, end quote.
wavs/LJ017-0050.wav|Palmer, who was only thirty-one at the time of his trial, was in appearance short and stout, with a round head
wavs/LJ036-0104.wav|Whaley picked Oswald.
wavs/LJ019-0055.wav|High authorities were in favor of continuous separation.
wavs/LJ010-0030.wav|The brutal ferocity of the wild beast once aroused, the same means, the same weapons were employed to do the dreadful deed,
wavs/LJ038-0047.wav|Some of the officers saw Oswald strike McDonald with his fist. Most of them heard a click which they assumed to be a click of the hammer of the revolver.
wavs/LJ009-0074.wav|Let us pass on.
wavs/LJ048-0069.wav|Efforts made by the Bureau since the assassination, on the other hand,
wavs/LJ003-0211.wav|They were never left quite alone for fear of suicide, and for the same reason they were searched for weapons or poisons.
wavs/LJ048-0053.wav|It is the conclusion of the Commission that, even in the absence of Secret Service criteria
wavs/LJ033-0093.wav|Frazier estimated that the bag was two feet long, quote, give and take a few inches, end quote, and about five or six inches wide.
wavs/LJ006-0149.wav|The turnkeys left the prisoners very much to themselves, never entering the wards after locking-up time, at dusk, till unlocking next morning,
wavs/LJ018-0211.wav|The false coin was bought by an agent from an agent, and dealings were carried on secretly at the "Clock House" in Seven Dials.
wavs/LJ008-0054.wav|This contrivance appears to have been copied with improvements from that which had been used in Dublin at a still earlier date,
wavs/LJ040-0052.wav|that his commitment to Marxism was an important factor influencing his conduct during his adult years.
wavs/LJ028-0023.wav|Two weeks pass, and at last you stand on the eastern edge of the plateau
wavs/LJ009-0184.wav|Lord Ferrers' body was brought to Surgeons' Hall after execution in his own carriage and six;
wavs/LJ005-0252.wav|A committee was appointed, under the presidency of the Duke of Richmond
wavs/LJ015-0266.wav|has probably no parallel in the annals of crime. Saward himself is a striking and in some respects an unique figure in criminal history.
wavs/LJ017-0059.wav|even after sentence, and until within a few hours of execution, he was buoyed up with the hope of reprieve.
wavs/LJ024-0034.wav|What do they mean by the words "packing the Court"?
wavs/LJ016-0089.wav|He was engaged in whitewashing and cleaning; the officer who had him in charge left him on the stairs leading to the gallery.
wavs/LJ039-0227.wav|with two hits, within four point eight and five point six seconds.
wavs/LJ001-0096.wav|have now come into general use and are obviously a great improvement on the ordinary "modern style" in use in England, which is in fact the Bodoni type
wavs/LJ018-0129.wav|who threatened to betray the theft. But Brewer, either before or after this, succumbed to temptation,
wavs/LJ010-0157.wav|and that, as he was starving, he had resolved on this desperate deed,
wavs/LJ038-0264.wav|He concluded that, quote, the general rifling characteristics of the rifle are of the same type as those found on the bullet
wavs/LJ031-0165.wav|When security arrangements at the airport were complete, the Secret Service made the necessary arrangements for the Vice President to leave the hospital.
wavs/LJ018-0244.wav|The effect of establishing the forgeries would be to restore to the Roupell family lands for which a price had already been paid
wavs/LJ007-0071.wav|in the face of impediments confessedly discouraging
wavs/LJ028-0340.wav|Such of the Babylonians as witnessed the treachery took refuge in the temple of Jupiter Belus;
wavs/LJ017-0164.wav|with the idea of subjecting her to the irritant poison slowly but surely until the desired effect, death, was achieved.
wavs/LJ048-0197.wav|I then told the officers that their primary duty was traffic and crowd control and that they should be alert for any persons who might attempt to throw anything
wavs/LJ013-0098.wav|Mr. Oxenford having denied that he had made any transfer of stock, the matter was at once put into the hands of the police.
wavs/LJ012-0049.wav|led him to think seriously of trying his fortunes in another land.
wavs/LJ030-0014.wav|quote, that the crowd was about the same as the one which came to see him before but there were one hundred thousand extra people on hand who came to see Mrs. Kennedy.
wavs/LJ014-0186.wav|A milliner's porter,
wavs/LJ015-0027.wav|Yet even so early as the death of the first Sir John Paul,
wavs/LJ047-0049.wav|Marina Oswald, however, recalled that her husband was upset by this interview.
wavs/LJ012-0021.wav|at fourteen he was a pickpocket and a "duffer," or a seller of sham goods.
wavs/LJ003-0140.wav|otherwise he would have been stripped of his clothes. End quote.
wavs/LJ042-0130.wav|Shortly thereafter, less than eighteen months after his defection, about six weeks before he met Marina Prusakova,
wavs/LJ019-0180.wav|His letter to the Corporation, under date fourth June,
wavs/LJ017-0108.wav|He was struck with the appearance of the corpse, which was not emaciated, as after a long disease ending in death;
wavs/LJ006-0268.wav|Women saw men if they merely pretended to be wives; even boys were visited by their sweethearts.
wavs/LJ044-0125.wav|of residence in the U.S.S.R. against any cause which I join, by association,
wavs/LJ015-0231.wav|It was Tester's business, who had access to the railway company's books, to watch for this.
wavs/LJ002-0225.wav|The rentals of rooms and fees went to the warden, whose income was two thousand three hundred seventy-two pounds.
wavs/LJ034-0072.wav|The employees raced the elevators to the first floor. Givens saw Oswald standing at the gate on the fifth floor as the elevator went by.
wavs/LJ045-0033.wav|He began to treat me better. He helped me more -- although he always did help. But he was more attentive, end quote.
wavs/LJ031-0058.wav|to infuse blood and fluids into the circulatory system.
wavs/LJ029-0197.wav|During November the Dallas papers reported frequently on the plans for protecting the President, stressing the thoroughness of the preparations.
wavs/LJ043-0047.wav|Oswald and his family lived for a brief period with his mother at her urging, but Oswald soon decided to move out.
wavs/LJ021-0026.wav|seems necessary to produce the same result of justice and right conduct
wavs/LJ003-0230.wav|The prison allowances were eked out by the broken victuals generously given by several eating-house keepers in the city,
wavs/LJ037-0252.wav|Ted Callaway, who saw the gunman moments after the shooting, testified that Commission Exhibit Number one sixty-two
wavs/LJ031-0008.wav|Meanwhile, Chief Curry ordered the police base station to notify Parkland Hospital that the wounded President was en route.
wavs/LJ030-0021.wav|all one had to do was get a high building someday with a telescopic rifle, and there was nothing anybody could do to defend against such an attempt.
wavs/LJ046-0179.wav|being reviewed regularly.
wavs/LJ025-0118.wav|and that, however diverse may be the fabrics or tissues of which their bodies are composed, all these varied structures result
wavs/LJ028-0278.wav|Zopyrus, when they told him, not thinking that it could be true, went and saw the colt with his own eyes;
wavs/LJ007-0090.wav|Not only did their presence tend greatly to interfere with the discipline of the prison, but their condition was deplorable in the extreme.
wavs/LJ045-0045.wav|that she would be able to leave the Soviet Union. Marina Oswald has denied this.
wavs/LJ028-0289.wav|For he cut off his own nose and ears, and then, clipping his hair close and flogging himself with a scourge,
wavs/LJ009-0276.wav|Calcraft, the moment he had adjusted the cap and rope, ran down the steps, drew the bolt, and disappeared.
wavs/LJ031-0122.wav|treated the gunshot wound in the left thigh.
wavs/LJ016-0205.wav|he received a retaining fee of five pounds, five shillings, with the usual guinea for each job;
wavs/LJ019-0248.wav|leading to an inequality, uncertainty, and inefficiency of punishment productive of the most prejudicial results.
wavs/LJ033-0183.wav|it was not surprising that the replica sack made on December one, nineteen sixty-three,
wavs/LJ037-0001.wav|Report of the President's Commission on the Assassination of President Kennedy. The Warren Commission Report. By The President's Commission on the Assassination of President Kennedy.
wavs/LJ018-0218.wav|In eighteen fifty-five
wavs/LJ001-0102.wav|Here and there a book is printed in France or Germany with some pretension to good taste,
wavs/LJ007-0125.wav|It was diverted from its proper uses, and, as the "place of the greatest comfort," was allotted to persons who should not have been sent to Newgate at all.
wavs/LJ050-0022.wav|A formal and thorough description of the responsibilities of the advance agent is now in preparation by the Service.
wavs/LJ028-0212.wav|On the night of the eleventh day Gobrias killed the son of the King.
wavs/LJ028-0357.wav|yet we may be sure that Babylon was taken by Darius only by use of stratagem. Its walls were impregnable.
wavs/LJ014-0199.wav|there was no case to make out; why waste money on lawyers for the defense? His demeanor was cool and collected throughout;
wavs/LJ016-0077.wav|A man named Lears, under sentence of transportation for an attempt at murder on board ship, got up part of the way,
wavs/LJ009-0194.wav|and that executors or persons having lawful possession of the bodies
wavs/LJ014-0094.wav|Discovery of the murder came in this wise. O'Connor, a punctual and well-conducted official, was at once missed at the London Docks.
wavs/LJ001-0079.wav|Caslon's type is clear and neat, and fairly well designed;
wavs/LJ026-0052.wav|In the nutrition of the animal the most essential and characteristic part of the food supply is derived from vegetable
wavs/LJ013-0005.wav|One of the earliest of the big operators in fraudulent finance was Edward Beaumont Smith,
wavs/LJ033-0072.wav|I then stepped off of it and the officer picked it up in the middle and it bent so.
wavs/LJ036-0067.wav|According to McWatters, the Beckley bus was behind the Marsalis bus, but he did not actually see it.
wavs/LJ025-0098.wav|and it is probable that amyloid substances are universally present in the animal organism, though not in the precise form of starch.
wavs/LJ005-0257.wav|during which time a host of witnesses were examined, and the committee presented three separate reports,
wavs/LJ004-0024.wav|Thus in eighteen thirteen the exaction of jail fees had been forbidden by law,
wavs/LJ049-0154.wav|In eighteen ninety-four,
wavs/LJ039-0059.wav|(three) his experience and practice after leaving the Marine Corps, and (four) the accuracy of the weapon and the quality of the ammunition.
wavs/LJ007-0150.wav|He is allowed intercourse with prostitutes who, in nine cases out of ten, have originally conduced to his ruin;
wavs/LJ015-0001.wav|Chronicles of Newgate, Volume two. By Arthur Griffiths. Section eighteen: Newgate notorieties continued, part three.
wavs/LJ010-0158.wav|feeling, as he said, that he might as well be shot or hanged as remain in such a state.
wavs/LJ010-0281.wav|who had borne the Queen's commission, first as cornet, and then lieutenant, in the tenth Hussars.
wavs/LJ033-0055.wav|and he could disassemble it more rapidly.
wavs/LJ015-0218.wav|A new accomplice was now needed within the company's establishment, and Pierce looked about long before he found the right person.
wavs/LJ027-0006.wav|In all these lines the facts are drawn together by a strong thread of unity.
wavs/LJ016-0049.wav|He had here completed his ascent.
wavs/LJ006-0088.wav|It was not likely that a system which left innocent men -- for the great bulk of new arrivals were still untried
wavs/LJ042-0133.wav|a great change must have occurred in Oswald's thinking to induce him to return to the United States.
wavs/LJ045-0234.wav|While he did become enraged at at least one point in his interrogation,
wavs/LJ046-0033.wav|The adequacy of existing procedures can fairly be assessed only after full consideration of the difficulty of the protective assignment,
wavs/LJ037-0061.wav|and having, quote, somewhat bushy, end quote, hair.
wavs/LJ032-0025.wav|the officers of Klein's discovered that a rifle bearing serial number C two seven six six had been shipped to one A. Hidell,
wavs/LJ047-0197.wav|in view of all the information concerning Oswald in its files, should have alerted the Secret Service to Oswald's presence in Dallas
wavs/LJ018-0130.wav|and stole paper on a much larger scale than Brown.
wavs/LJ005-0265.wav|It was recommended that the dietaries should be submitted and approved like the rules; that convicted prisoners should not receive any food but the jail allowance;
wavs/LJ044-0105.wav|He presented Arnold Johnson, Gus Hall,
wavs/LJ015-0043.wav|This went on for some time, and might never have been discovered had some good stroke of luck provided any of the partners
wavs/LJ030-0125.wav|On several occasions when the Vice President's car was slowed down by the throng, Special Agent Youngblood stepped out to hold the crowd back.
wavs/LJ043-0140.wav|He also studied Dallas bus schedules to prepare for his later use of buses to travel to and from General Walker's house.
wavs/LJ002-0220.wav|In consequence of these disclosures, both Bambridge and Huggin, his predecessor in the office, were committed to Newgate,
wavs/LJ034-0117.wav|At one:twenty-nine p.m. the police radio reported
wavs/LJ018-0276.wav|The first plot was against Mr. Harry Emmanuel, but he escaped, and the attempt was made upon Loudon and Ryder.
wavs/LJ004-0077.wav|nor has he a right to poison or starve his fellow-creatures."
wavs/LJ042-0194.wav|they should not be confused with slowness, indecision or fear. Only the intellectually fearless could even be remotely attracted to our doctrine,
wavs/LJ029-0114.wav|The route chosen from the airport to Main Street was the normal one, except where Harwood Street was selected as the means of access to Main Street
wavs/LJ014-0194.wav|The policemen were now in possession;
wavs/LJ032-0027.wav|According to its microfilm records, Klein's received an order for a rifle on March thirteen, nineteen sixty-three,
wavs/LJ048-0289.wav|However, there is no evidence that these men failed to take any action in Dallas within their power that would have averted the tragedy.
wavs/LJ043-0188.wav|that he was the leader of a fascist organization, and when I said that even though all of that might be true, just the same he had no right to take his life,
wavs/LJ011-0118.wav|In eighteen twenty-nine the gallows claimed two more victims for this offense.
wavs/LJ040-0201.wav|After her interview with Mrs. Oswald,
wavs/LJ033-0056.wav|While the rifle may have already been disassembled when Oswald arrived home on Thursday, he had ample time that evening to disassemble the rifle
wavs/LJ047-0073.wav|Hosty considered the information to be, quote, stale, unquote, by that time, and did not attempt to verify Oswald's reported statement.
wavs/LJ001-0153.wav|only nominally so, however, in many cases, since when he uses a headline he counts that in,
wavs/LJ007-0158.wav|or any kind of moral improvement was impossible; the prisoner's career was inevitably downward, till he struck the lowest depths.
wavs/LJ028-0502.wav|The Ishtar gateway leading to the palace was encased with beautiful blue glazed bricks,
wavs/LJ028-0226.wav|Though Herodotus wrote nearly a hundred years after Babylon fell, his story seems to bear the stamp of truth.
wavs/LJ010-0038.wav|as there had been before; as in the year eighteen forty-nine, a year memorable for the Rush murders at Norwich,
wavs/LJ019-0241.wav|But in the interval very comprehensive and, I think it must be admitted, salutary changes were successively introduced into the management of prisons.
wavs/LJ001-0094.wav|were induced to cut punches for a series of "old style" letters.
wavs/LJ001-0015.wav|the forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves.
wavs/LJ047-0015.wav|From defection to return to Fort Worth.
wavs/LJ044-0139.wav|since there was no background to the New Orleans FPCC, quote, organization, end quote, which consisted solely of Oswald.
wavs/LJ050-0031.wav|that the Secret Service consciously set about the task of inculcating and maintaining the highest standard of excellence and esprit, for all of its personnel.
wavs/LJ050-0235.wav|It has also used other Federal law enforcement agents during Presidential visits to cities in which such agents are stationed.
wavs/LJ050-0137.wav|FBI, and the Secret Service.
wavs/LJ031-0109.wav|At one:thirty-five p.m., after Governor Connally had been moved to the operating room, Dr. Shaw started the first operation
wavs/LJ031-0041.wav|He noted that the President was blue-white or ashen in color; had slow, spasmodic, agonal respiration without any coordination;
wavs/LJ021-0139.wav|There should be at least a full and fair trial given to these means of ending industrial warfare;
wavs/LJ029-0004.wav|The narrative of these events is based largely on the recollections of the participants,
wavs/LJ023-0122.wav|It was said in last year's Democratic platform,
wavs/LJ005-0264.wav|inspectors of prisons should be appointed, who should visit all the prisons from time to time and report to the Secretary of State.
wavs/LJ002-0105.wav|and beyond it was a room called the "wine room," because formerly used for the sale of wine, but
wavs/LJ017-0035.wav|in the interests and for the due protection of the public, that the fullest and fairest inquiry should be made,
wavs/LJ048-0252.wav|Three of these agents occupied positions on the running boards of the car, and the fourth was seated in the car.
wavs/LJ013-0109.wav|The proceeds of the robbery were lodged in a Boston bank,
wavs/LJ039-0139.wav|Oswald obtained a hunting license, joined a hunting club and went hunting about six times, as discussed more fully in chapter six.
wavs/LJ044-0047.wav|that anyone ever attacked any street demonstration in which Oswald was involved, except for the Bringuier incident mentioned above,
wavs/LJ016-0417.wav|Catherine Wilson, the poisoner, was reserved and reticent to the last, expressing no contrition, but also no fear --
wavs/LJ045-0178.wav|he left his wedding ring in a cup on the dresser in his room. He also left one hundred seventy dollars in a wallet in one of the dresser drawers.
wavs/LJ009-0172.wav|While in London, for instance, in eighteen twenty-nine, twenty-four persons had been executed for crimes other than murder,
wavs/LJ049-0202.wav|incident to its responsibilities.
wavs/LJ032-0103.wav|The name "Hidell" was stamped on some of the "Chapter's" printed literature and on the membership application blanks.
wavs/LJ013-0091.wav|and Elder had to be assisted by two bank porters, who carried it for him to a carriage waiting near the Mansion House.
wavs/LJ037-0208.wav|nineteen dollars, ninety-five cents, plus one dollar, twenty-seven cents shipping charge, had been collected from the consignee, Hidell.
wavs/LJ014-0128.wav|her hair was dressed in long crepe bands. She had lace ruffles at her wrist, and wore primrose-colored kid gloves.
wavs/LJ015-0007.wav|This affected Cole's credit, and ugly reports were in circulation charging him with the issue of simulated warrants.
wavs/LJ036-0169.wav|he would have reached his destination at approximately twelve:fifty-four p.m.
wavs/LJ021-0040.wav|The second step we have taken in the restoration of normal business enterprise
wavs/LJ015-0036.wav|The bank was already insolvent,
wavs/LJ034-0041.wav|Although Bureau experiments had shown that twenty-four hours was a likely maximum time, Latona stated
wavs/LJ009-0192.wav|The dissection of executed criminals was abolished soon after the discovery of the crime of burking,
wavs/LJ037-0248.wav|The eyewitnesses vary in their identification of the jacket.
wavs/LJ015-0289.wav|As each transaction was carried out from a different address, and a different messenger always employed,
wavs/LJ005-0072.wav|After a few years of active exertion the Society was rewarded by fresh legislation.
wavs/LJ023-0047.wav|The three horses are, of course, the three branches of government -- the Congress, the Executive and the courts.
wavs/LJ009-0126.wav|Hardly any one.
wavs/LJ034-0097.wav|The window was approximately one hundred twenty feet away.
wavs/LJ028-0462.wav|They were laid in bitumen.
wavs/LJ046-0055.wav|It is now possible for Presidents to travel the length and breadth of a land far larger than the United States
wavs/LJ019-0371.wav|Yet the law was seldom if ever enforced.
wavs/LJ039-0207.wav|Although all of the shots were a few inches high and to the right of the target,
wavs/LJ002-0174.wav|Mr. Buxton's friends at once paid the forty shillings, and the boy was released.
wavs/LJ016-0233.wav|In his own profession
wavs/LJ026-0108.wav|It is clear that there are upward and downward currents of water containing food (comparable to blood of an animal),
wavs/LJ038-0035.wav|Oswald rose from his seat, bringing up both hands.
wavs/LJ026-0148.wav|water which is lost by evaporation, especially from the leaf surface through the stomata;
wavs/LJ001-0186.wav|the position of our Society that a work of utility might be also a work of art, if we cared to make it so.
wavs/LJ016-0264.wav|The upturned faces of the eager spectators resembled those of the 'gods' at Drury Lane on Boxing Night;
wavs/LJ009-0041.wav|The occupants of this terrible black pew were the last always to enter the chapel.
wavs/LJ010-0297.wav|But there were other notorious cases of forgery.
wavs/LJ040-0018.wav|the Commission is not able to reach any definite conclusions as to whether or not he was, quote, sane, unquote, under prevailing legal standards.
wavs/LJ005-0253.wav|"to inquire into and report upon the several jails and houses of correction in the counties, cities, and corporate towns within England and Wales
wavs/LJ027-0176.wav|Fishes first appeared in the Devonian and Upper Silurian in very reptilian or rather amphibian forms.
wavs/LJ034-0035.wav|The position of this palmprint on the carton was parallel with the long axis of the box, and at right angles with the short axis;
wavs/LJ016-0054.wav|But he did not like the risk of entering a room by the fireplace, and the chances of detection it offered.
wavs/LJ018-0262.wav|Roupell received the announcement with a cheerful countenance,
wavs/LJ044-0237.wav|with thirteen dollars, eighty-seven cents when considerably greater resources were available to him.
wavs/LJ034-0166.wav|Two other witnesses were able to offer partial descriptions of a man they saw in the southeast corner window
wavs/LJ016-0238.wav|"just to steady their legs a little;" in other words, to add his weight to that of the hanging bodies.
wavs/LJ042-0198.wav|The discussion above has already set forth examples of his expression of hatred for the United States.
wavs/LJ031-0189.wav|At two:thirty-eight p.m., Eastern Standard Time, Lyndon Baines Johnson took the oath of office as the thirty-sixth President of the United States.
wavs/LJ050-0084.wav|or, quote, other high government officials in the nature of a complaint coupled with an expressed or implied determination to use a means,
wavs/LJ044-0158.wav|As for my return entrance visa please consider it separately. End quote.
wavs/LJ045-0082.wav|it appears that Marina Oswald also complained that her husband was not able to provide more material things for her.
wavs/LJ045-0190.wav|appeared in The Dallas Times Herald on November fifteen, nineteen sixty-three.
wavs/LJ035-0155.wav|The only exit from the office in the direction Oswald was moving was through the door to the front stairway.
wavs/LJ044-0004.wav|Political Activities
wavs/LJ046-0016.wav|The Commission has not undertaken a comprehensive examination of all facets of this subject;
wavs/LJ019-0368.wav|The latter too was to be laid before the House of Commons.
wavs/LJ010-0062.wav|But they proceeded in all seriousness, and would have shrunk from no outrage or atrocity in furtherance of their foolhardy enterprise.
wavs/LJ033-0159.wav|It was from Oswald's right hand, in which he carried the long package as he walked from Frazier's car to the building.
wavs/LJ002-0171.wav|The boy declared he saw no one, and accordingly passed through without paying the toll of a penny.
wavs/LJ002-0298.wav|in his evidence in eighteen fourteen, said it was more,
wavs/LJ012-0219.wav|and in one corner, at some depth, a bundle of clothes were unearthed, which, with a hairy cap,
wavs/LJ017-0190.wav|After this came the charge of administering oil of vitriol, which failed, as has been described.
wavs/LJ019-0179.wav|This, with a scheme for limiting the jail to untried prisoners, had been urgently recommended by Lord John Russell in eighteen thirty.
wavs/LJ050-0188.wav|each patrolman might be given a prepared booklet of instructions explaining what is expected of him. The Secret Service has expressed concern
wavs/LJ006-0043.wav|The disgraceful overcrowding had been partially ended, but the same evils of indiscriminate association were still present; there was the old neglect of decency,
wavs/LJ029-0060.wav|A number of people who resembled some of those in the photographs were placed under surveillance at the Trade Mart.
wavs/LJ019-0052.wav|Both systems came to us from the United States. The difference was really more in degree than in principle,
wavs/LJ037-0081.wav|Later in the day each woman found an empty shell on the ground near the house. These two shells were delivered to the police.
wavs/LJ048-0200.wav|paying particular attention to the crowd for any unusual activity.
wavs/LJ016-0426.wav|come along, gallows.
wavs/LJ008-0182.wav|A tremendous crowd assembled when Bellingham was executed in eighteen twelve for the murder of Spencer Percival, at that time prime minister;
wavs/LJ043-0107.wav|Upon moving to New Orleans on April twenty-four, nineteen sixty-three,
wavs/LJ006-0084.wav|and so numerous were his opportunities of showing favoritism, that all the prisoners may be said to be in his power.
wavs/LJ025-0081.wav|has no permanent digestive cavity or mouth, but takes in its food anywhere and digests, so to speak, all over its body.
wavs/LJ019-0042.wav|These were either satisfied with a makeshift, and modified existing buildings, without close regard to their suitability, or for a long time did nothing at all.
wavs/LJ047-0240.wav|They agree that Hosty told Revill
wavs/LJ032-0012.wav|the resistance to arrest and the attempted shooting of another police officer by the man (Lee Harvey Oswald) subsequently accused of assassinating President Kennedy
wavs/LJ050-0209.wav|The assistant to the Director of the FBI testified that
|
PyTorch/Classification/GPUNet/configs/batch1/GV100 | GV100 | 0.65ms | [
{
"layer_type": "data",
"img_resolution": 320,
"distill": false
},
{
"layer_type": "head",
"num_in_channels": 3,
"num_out_channels": 32
},
{
"layer_type": "conv",
"num_in_channels": 32,
"num_out_channels": 32,
"stride": 1,
"kernel_size": 3,
"act": "relu",
"stage": 1
},
{
"layer_type": "conv",
"num_in_channels": 32,
"num_out_channels": 32,
"stride": 1,
"kernel_size": 3,
"act": "relu",
"stage": 1
},
{
"layer_type": "fused_irb",
"num_in_channels": 32,
"num_out_channels": 32,
"stride": 2,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 2
},
{
"layer_type": "fused_irb",
"num_in_channels": 32,
"num_out_channels": 32,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 2
},
{
"layer_type": "fused_irb",
"num_in_channels": 32,
"num_out_channels": 64,
"stride": 2,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 3
},
{
"layer_type": "fused_irb",
"num_in_channels": 64,
"num_out_channels": 64,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 3
},
{
"layer_type": "fused_irb",
"num_in_channels": 64,
"num_out_channels": 64,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 3
},
{
"layer_type": "irb",
"num_in_channels": 64,
"num_out_channels": 256,
"stride": 2,
"expansion": 5,
"kernel_size": 3,
"act": "swish",
"use_se": false,
"stage": 4
},
{
"layer_type": "irb",
"num_in_channels": 256,
"num_out_channels": 256,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "swish",
"use_se": false,
"stage": 4
},
{
"layer_type": "irb",
"num_in_channels": 256,
"num_out_channels": 256,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "swish",
"use_se": false,
"stage": 4
},
{
"layer_type": "irb",
"num_in_channels": 256,
"num_out_channels": 704,
"stride": 2,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": true,
"stage": 6
},
{
"layer_type": "irb",
"num_in_channels": 704,
"num_out_channels": 704,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": true,
"stage": 6
},
{
"layer_type": "tail",
"num_in_channels": 704,
"num_out_channels": 1280,
"num_classes": 1000
}
] |
Tools/PyTorch/TimeSeriesPredictionPlatform/models | models | lstm | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from apex.normalization.fused_layer_norm import FusedLayerNorm
from torch import Tensor
from models.tft_pyt.modeling import *
class LSTM(nn.Module):
"""
Implementation from LSTM portion of https://arxiv.org/abs/1912.09363
"""
def __init__(self, config):
super().__init__()
self.encoder_steps = config.encoder_length # this determines from how distant past we want to use data from
self.mask_nans = config.missing_data_strategy == "mask"
self.embedding = TFTEmbedding(config)
self.static_encoder = StaticCovariateEncoder(config)
self.history_vsn = VariableSelectionNetwork(config, config.num_historic_vars)
self.history_encoder = nn.LSTM(config.hidden_size, config.hidden_size, batch_first=True)
self.future_vsn = VariableSelectionNetwork(config, config.num_future_vars)
self.future_encoder = nn.LSTM(config.hidden_size, config.hidden_size, batch_first=True)
self.output_proj = nn.Linear(config.hidden_size, 1)
def forward(self, x: Tensor) -> Tensor:
s_inp, t_known_inp, t_observed_inp, t_observed_tgt = self.embedding(x)
# Static context
cs, ce, ch, cc = self.static_encoder(s_inp)
ch, cc = ch.unsqueeze(0), cc.unsqueeze(0) # lstm initial states
# Temporal input
_historical_inputs = [t_known_inp[:, : self.encoder_steps, :], t_observed_tgt[:, : self.encoder_steps, :]]
if t_observed_inp is not None:
_historical_inputs.insert(0, t_observed_inp[:, : self.encoder_steps, :])
historical_inputs = torch.cat(_historical_inputs, dim=-2)
future_inputs = t_known_inp[:, self.encoder_steps :]
# Encoders
historical_features, _ = self.history_vsn(historical_inputs, cs)
history, state = self.history_encoder(historical_features, (ch, cc))
future_features, _ = self.future_vsn(future_inputs, cs)
future, _ = self.future_encoder(future_features, state)
output = self.output_proj(future)
return output
|
CUDA-Optimized/FastSpeech/fastspeech/hparams | hparams | __init__ | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
PyTorch/SpeechSynthesis/HiFiGAN/common/text | text | abbreviations | import re
_no_period_re = re.compile(r'(No[.])(?=[ ]?[0-9])')
_percent_re = re.compile(r'([ ]?[%])')
_half_re = re.compile('([0-9]½)|(½)')
_url_re = re.compile(r'([a-zA-Z])\.(com|gov|org)')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('ms', 'miss'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
('sen', 'senator'),
('etc', 'et cetera'),
]]
def _expand_no_period(m):
word = m.group(0)
if word[0] == 'N':
return 'Number'
return 'number'
def _expand_percent(m):
return ' percent'
def _expand_half(m):
word = m.group(1)
if word is None:
return 'half'
return word[0] + ' and a half'
def _expand_urls(m):
return f'{m.group(1)} dot {m.group(2)}'
def normalize_abbreviations(text):
text = re.sub(_no_period_re, _expand_no_period, text)
text = re.sub(_percent_re, _expand_percent, text)
text = re.sub(_half_re, _expand_half, text)
text = re.sub('&', ' and ', text)
text = re.sub('@', ' at ', text)
text = re.sub(_url_re, _expand_urls, text)
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
|
PyTorch/SpeechSynthesis/Tacotron2/phrases | phrases | phrase_1_256 | The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages.
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit | deployment_toolkit | extensions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
converters = ExtensionManager("converters")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
|
TensorFlow/Detection/SSD/models/research/object_detection/g3doc | g3doc | running_on_mobile_tensorflowlite | # Running on mobile with TensorFlow Lite
In this section, we will show you how to use [TensorFlow
Lite](https://www.tensorflow.org/mobile/tflite/) to get a smaller model and
allow you take advantage of ops that have been optimized for mobile devices.
TensorFlow Lite is TensorFlow’s lightweight solution for mobile and embedded
devices. It enables on-device machine learning inference with low latency and a
small binary size. TensorFlow Lite uses many techniques for this such as
quantized kernels that allow smaller and faster (fixed-point math) models.
For this section, you will need to build [TensorFlow from
source](https://www.tensorflow.org/install/install_sources) to get the
TensorFlow Lite support for the SSD model. At this time only SSD models are supported.
Models like faster_rcnn are not supported at this time. You will also need to install the
[bazel build
tool](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#bazel).
To make these commands easier to run, let’s set up some environment variables:
```shell
export CONFIG_FILE=PATH_TO_BE_CONFIGURED/pipeline.config
export CHECKPOINT_PATH=PATH_TO_BE_CONFIGURED/model.ckpt
export OUTPUT_DIR=/tmp/tflite
```
We start with a checkpoint and get a TensorFlow frozen graph with compatible ops
that we can use with TensorFlow Lite. First, you’ll need to install these
[python
libraries](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md).
Then to get the frozen graph, run the export_tflite_ssd_graph.py script from the
`models/research` directory with this command:
```shell
object_detection/export_tflite_ssd_graph.py \
--pipeline_config_path=$CONFIG_FILE \
--trained_checkpoint_prefix=$CHECKPOINT_PATH \
--output_directory=$OUTPUT_DIR \
--add_postprocessing_op=true
```
In the /tmp/tflite directory, you should now see two files: tflite_graph.pb and
tflite_graph.pbtxt. Note that the add_postprocessing flag enables the model to
take advantage of a custom optimized detection post-processing operation which
can be thought of as a replacement for
[tf.image.non_max_suppression](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression).
Make sure not to confuse export_tflite_ssd_graph with export_inference_graph in
the same directory. Both scripts output frozen graphs: export_tflite_ssd_graph
will output the frozen graph that we can input to TensorFlow Lite directly and
is the one we’ll be using.
Next we’ll use TensorFlow Lite to get the optimized model by using
[TOCO](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/toco),
the TensorFlow Lite Optimizing Converter. This will convert the resulting frozen
graph (tflite_graph.pb) to the TensorFlow Lite flatbuffer format (detect.tflite)
via the following command. For a quantized model, run this from the tensorflow/
directory:
```shell
bazel run --config=opt tensorflow/lite/toco:toco -- \
--input_file=$OUTPUT_DIR/tflite_graph.pb \
--output_file=$OUTPUT_DIR/detect.tflite \
--input_shapes=1,300,300,3 \
--input_arrays=normalized_input_image_tensor \
--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \
--inference_type=QUANTIZED_UINT8 \
--mean_values=128 \
--std_values=128 \
--change_concat_input_ranges=false \
--allow_custom_ops
```
This command takes the input tensor normalized_input_image_tensor after resizing
each camera image frame to 300x300 pixels. The outputs of the quantized model
are named 'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', and 'TFLite_Detection_PostProcess:3' and
represent four arrays: detection_boxes, detection_classes, detection_scores, and
num_detections. The documentation for other flags used in this command is
[here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/convert/cmdline_reference.md).
If things ran successfully, you should now see a third file in the /tmp/tflite
directory called detect.tflite. This file contains the graph and all model
parameters and can be run via the TensorFlow Lite interpreter on the Android
device. For a floating point model, run this from the tensorflow/ directory:
```shell
bazel run --config=opt tensorflow/lite/toco:toco -- \
--input_file=$OUTPUT_DIR/tflite_graph.pb \
--output_file=$OUTPUT_DIR/detect.tflite \
--input_shapes=1,300,300,3 \
--input_arrays=normalized_input_image_tensor \
--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \
--inference_type=FLOAT \
--allow_custom_ops
```
# Running our model on Android
To run our TensorFlow Lite model on device, we will need to install the Android
NDK and SDK. The current recommended Android NDK version is 14b and can be found
on the [NDK
Archives](https://developer.android.com/ndk/downloads/older_releases.html#ndk-14b-downloads)
page. Android SDK and build tools can be [downloaded
separately](https://developer.android.com/tools/revisions/build-tools.html) or
used as part of [Android
Studio](https://developer.android.com/studio/index.html). To build the
TensorFlow Lite Android demo, build tools require API >= 23 (but it will run on
devices with API >= 21). Additional details are available on the [TensorFlow
Lite Android App
page](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/java/demo/README.md).
Next we need to point the app to our new detect.tflite file and give it the
names of our new labels. Specifically, we will copy our TensorFlow Lite
flatbuffer to the app assets directory with the following command:
```shell
cp /tmp/tflite/detect.tflite \
//tensorflow/lite/examples/android/app/src/main/assets
```
You will also need to copy your new labelmap labels_list.txt to the assets
directory.
We will now edit the BUILD file to point to this new model. First, open the
BUILD file tensorflow/lite/examples/android/BUILD. Then find the assets
section, and replace the line “@tflite_mobilenet_ssd_quant//:detect.tflite”
(which by default points to a COCO pretrained model) with the path to your new
TFLite model
“//tensorflow/lite/examples/android/app/src/main/assets:detect.tflite”.
Finally, change the last line in assets section to use the new label map as
well.
We will also need to tell our app to use the new label map. In order to do this,
open up the
tensorflow/lite/examples/android/app/src/main/java/org/tensorflow/demo/DetectorActivity.java
file in a text editor and find the definition of TF_OD_API_LABELS_FILE. Update
this path to point to your new label map file:
"file:///android_asset/labels_list.txt". Note that if your model is quantized,
the flag TF_OD_API_IS_QUANTIZED is set to true, and if your model is floating
point, the flag TF_OD_API_IS_QUANTIZED is set to false. This new section of
DetectorActivity.java should now look as follows for a quantized model:
```shell
private static final boolean TF_OD_API_IS_QUANTIZED = true;
private static final String TF_OD_API_MODEL_FILE = "detect.tflite";
private static final String TF_OD_API_LABELS_FILE = "file:///android_asset/labels_list.txt";
```
Once you’ve copied the TensorFlow Lite file and edited your BUILD and
DetectorActivity.java files, you can build the demo app, run this bazel command
from the tensorflow directory:
```shell
bazel build -c opt --config=android_arm{,64} --cxxopt='--std=c++11'
"//tensorflow/lite/examples/android:tflite_demo"
```
Now install the demo on a
[debug-enabled](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#install)
Android phone via [Android Debug
Bridge](https://developer.android.com/studio/command-line/adb) (adb):
```shell
adb install bazel-bin/tensorflow/lite/examples/android/tflite_demo.apk
```
|
CUDA-Optimized/FastSpeech/tacotron2 | tacotron2 | multiproc | # BSD 3-Clause License
# Copyright (c) 2018-2020, NVIDIA Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""https://github.com/NVIDIA/tacotron2"""
import time
import torch
import sys
import subprocess
argslist = list(sys.argv)[1:]
num_gpus = torch.cuda.device_count()
argslist.append('--n_gpus={}'.format(num_gpus))
workers = []
job_id = time.strftime("%Y_%m_%d-%H%M%S")
argslist.append("--group_name=group_{}".format(job_id))
for i in range(num_gpus):
argslist.append('--rank={}'.format(i))
stdout = None if i == 0 else open("logs/{}_GPU_{}.log".format(job_id, i),
"w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
argslist = argslist[:-1]
for p in workers:
p.wait()
|
PyTorch/SpeechSynthesis/FastPitch | FastPitch | train | # *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import copy
import os
import time
from collections import defaultdict, OrderedDict
from itertools import cycle
import numpy as np
import torch
import torch.distributed as dist
import amp_C
from apex.optimizers import FusedAdam, FusedLAMB
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import common.tb_dllogger as logger
import models
from common.tb_dllogger import log
from common.repeated_dataloader import (RepeatedDataLoader,
RepeatedDistributedSampler)
from common.text import cmudict
from common.utils import (BenchmarkStats, Checkpointer,
load_pretrained_weights, prepare_tmp)
from fastpitch.attn_loss_function import AttentionBinarizationLoss
from fastpitch.data_function import batch_to_gpu, ensure_disjoint, TTSCollate, TTSDataset
from fastpitch.loss_function import FastPitchLoss
def parse_args(parser):
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('-d', '--dataset-path', type=str, default='./',
help='Path to dataset')
parser.add_argument('--log-file', type=str, default=None,
help='Path to a DLLogger log file')
train = parser.add_argument_group('training setup')
train.add_argument('--epochs', type=int, required=True,
help='Number of total epochs to run')
train.add_argument('--epochs-per-checkpoint', type=int, default=50,
help='Number of epochs per checkpoint')
train.add_argument('--checkpoint-path', type=str, default=None,
help='Checkpoint path to resume training')
train.add_argument('--keep-milestones', default=list(range(100, 1000, 100)),
type=int, nargs='+',
help='Milestone checkpoints to keep from removing')
train.add_argument('--resume', action='store_true',
help='Resume training from the last checkpoint')
train.add_argument('--seed', type=int, default=1234,
help='Seed for PyTorch random number generators')
train.add_argument('--amp', action='store_true',
help='Enable AMP')
train.add_argument('--cuda', action='store_true',
help='Run on GPU using CUDA')
train.add_argument('--cudnn-benchmark', action='store_true',
help='Enable cudnn benchmark mode')
train.add_argument('--ema-decay', type=float, default=0,
help='Discounting factor for training weights EMA')
train.add_argument('--grad-accumulation', type=int, default=1,
help='Training steps to accumulate gradients for')
train.add_argument('--kl-loss-start-epoch', type=int, default=250,
help='Start adding the hard attention loss term')
train.add_argument('--kl-loss-warmup-epochs', type=int, default=100,
help='Gradually increase the hard attention loss term')
train.add_argument('--kl-loss-weight', type=float, default=1.0,
help='Gradually increase the hard attention loss term')
train.add_argument('--benchmark-epochs-num', type=int, default=20,
help='Number of epochs for calculating final stats')
train.add_argument('--validation-freq', type=int, default=1,
help='Validate every N epochs to use less compute')
train.add_argument('--init-from-checkpoint', type=str, default=None,
help='Initialize model weights with a pre-trained ckpt')
opt = parser.add_argument_group('optimization setup')
opt.add_argument('--optimizer', type=str, default='lamb',
help='Optimization algorithm')
opt.add_argument('-lr', '--learning-rate', type=float, required=True,
help='Learing rate')
opt.add_argument('--weight-decay', default=1e-6, type=float,
help='Weight decay')
opt.add_argument('--grad-clip-thresh', default=1000.0, type=float,
help='Clip threshold for gradients')
opt.add_argument('-bs', '--batch-size', type=int, required=True,
help='Batch size per GPU')
opt.add_argument('--warmup-steps', type=int, default=1000,
help='Number of steps for lr warmup')
opt.add_argument('--dur-predictor-loss-scale', type=float,
default=1.0, help='Rescale duration predictor loss')
opt.add_argument('--pitch-predictor-loss-scale', type=float,
default=1.0, help='Rescale pitch predictor loss')
opt.add_argument('--attn-loss-scale', type=float,
default=1.0, help='Rescale alignment loss')
data = parser.add_argument_group('dataset parameters')
data.add_argument('--training-files', type=str, nargs='*', required=True,
help='Paths to training filelists.')
data.add_argument('--validation-files', type=str, nargs='*',
required=True, help='Paths to validation filelists')
data.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
data.add_argument('--symbol-set', type=str, default='english_basic',
help='Define symbol set for input text')
data.add_argument('--p-arpabet', type=float, default=0.0,
help='Probability of using arpabets instead of graphemes '
'for each word; set 0 for pure grapheme training')
data.add_argument('--heteronyms-path', type=str, default='cmudict/heteronyms',
help='Path to the list of heteronyms')
data.add_argument('--cmudict-path', type=str, default='cmudict/cmudict-0.7b',
help='Path to the pronouncing dictionary')
data.add_argument('--prepend-space-to-text', action='store_true',
help='Capture leading silence with a space token')
data.add_argument('--append-space-to-text', action='store_true',
help='Capture trailing silence with a space token')
data.add_argument('--num-workers', type=int, default=6,
help='Subprocesses for train and val DataLoaders')
data.add_argument('--trainloader-repeats', type=int, default=100,
help='Repeats the dataset to prolong epochs')
cond = parser.add_argument_group('data for conditioning')
cond.add_argument('--n-speakers', type=int, default=1,
help='Number of speakers in the dataset. '
'n_speakers > 1 enables speaker embeddings')
cond.add_argument('--load-pitch-from-disk', action='store_true',
help='Use pitch cached on disk with prepare_dataset.py')
cond.add_argument('--pitch-online-method', default='pyin',
choices=['pyin'],
help='Calculate pitch on the fly during trainig')
cond.add_argument('--pitch-online-dir', type=str, default=None,
help='A directory for storing pitch calculated on-line')
cond.add_argument('--pitch-mean', type=float, default=214.72203,
help='Normalization value for pitch')
cond.add_argument('--pitch-std', type=float, default=65.72038,
help='Normalization value for pitch')
cond.add_argument('--load-mel-from-disk', action='store_true',
help='Use mel-spectrograms cache on the disk') # XXX
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
dist = parser.add_argument_group('distributed setup')
dist.add_argument('--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0),
help='Rank of the process for multiproc; do not set manually')
dist.add_argument('--world_size', type=int, default=os.getenv('WORLD_SIZE', 1),
help='Number of processes for multiproc; do not set manually')
return parser
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt.true_divide(num_gpus)
def init_distributed(args, world_size, rank):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing distributed training")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(backend=('nccl' if args.cuda else 'gloo'),
init_method='env://')
print("Done initializing distributed training")
def validate(model, epoch, total_iter, criterion, val_loader, distributed_run,
batch_to_gpu, ema=False):
was_training = model.training
model.eval()
tik = time.perf_counter()
with torch.no_grad():
val_meta = defaultdict(float)
val_num_frames = 0
for i, batch in enumerate(val_loader):
x, y, num_frames = batch_to_gpu(batch)
y_pred = model(x)
loss, meta = criterion(y_pred, y, is_training=False, meta_agg='sum')
if distributed_run:
for k, v in meta.items():
val_meta[k] += reduce_tensor(v, 1)
val_num_frames += reduce_tensor(num_frames.data, 1).item()
else:
for k, v in meta.items():
val_meta[k] += v
val_num_frames += num_frames.item()
val_meta = {k: v / len(val_loader.dataset) for k, v in val_meta.items()}
val_meta['took'] = time.perf_counter() - tik
log((epoch,) if epoch is not None else (), tb_total_steps=total_iter,
subset='val_ema' if ema else 'val',
data=OrderedDict([
('loss', val_meta['loss'].item()),
('mel_loss', val_meta['mel_loss'].item()),
('frames/s', val_num_frames / val_meta['took']),
('took', val_meta['took'])]),
)
if was_training:
model.train()
return val_meta
def adjust_learning_rate(total_iter, opt, learning_rate, warmup_iters=None):
if warmup_iters == 0:
scale = 1.0
elif total_iter > warmup_iters:
scale = 1. / (total_iter ** 0.5)
else:
scale = total_iter / (warmup_iters ** 1.5)
for param_group in opt.param_groups:
param_group['lr'] = learning_rate * scale
def apply_ema_decay(model, ema_model, decay):
if not decay:
return
st = model.state_dict()
add_module = hasattr(model, 'module') and not hasattr(ema_model, 'module')
for k, v in ema_model.state_dict().items():
if add_module and not k.startswith('module.'):
k = 'module.' + k
v.copy_(decay * v + (1 - decay) * st[k])
def init_multi_tensor_ema(model, ema_model):
model_weights = list(model.state_dict().values())
ema_model_weights = list(ema_model.state_dict().values())
ema_overflow_buf = torch.cuda.IntTensor([0])
return model_weights, ema_model_weights, ema_overflow_buf
def apply_multi_tensor_ema(decay, model_weights, ema_weights, overflow_buf):
amp_C.multi_tensor_axpby(
65536, overflow_buf, [ema_weights, model_weights, ema_weights],
decay, 1-decay, -1)
def main():
parser = argparse.ArgumentParser(description='PyTorch FastPitch Training',
allow_abbrev=False)
parser = parse_args(parser)
args, _ = parser.parse_known_args()
if args.p_arpabet > 0.0:
cmudict.initialize(args.cmudict_path, args.heteronyms_path)
distributed_run = args.world_size > 1
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
if args.local_rank == 0:
if not os.path.exists(args.output):
os.makedirs(args.output)
log_fpath = args.log_file or os.path.join(args.output, 'nvlog.json')
tb_subsets = ['train', 'val']
if args.ema_decay > 0.0:
tb_subsets.append('val_ema')
logger.init(log_fpath, args.output, enabled=(args.local_rank == 0),
tb_subsets=tb_subsets)
logger.parameters(vars(args), tb_subset='train')
parser = models.parse_model_args('FastPitch', parser)
args, unk_args = parser.parse_known_args()
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if distributed_run:
init_distributed(args, args.world_size, args.local_rank)
else:
if args.trainloader_repeats > 1:
print('WARNING: Disabled --trainloader-repeats, supported only for'
' multi-GPU data loading.')
args.trainloader_repeats = 1
device = torch.device('cuda' if args.cuda else 'cpu')
model_config = models.get_model_config('FastPitch', args)
model = models.get_model('FastPitch', model_config, device)
if args.init_from_checkpoint is not None:
load_pretrained_weights(model, args.init_from_checkpoint)
attention_kl_loss = AttentionBinarizationLoss()
# Store pitch mean/std as params to translate from Hz during inference
model.pitch_mean[0] = args.pitch_mean
model.pitch_std[0] = args.pitch_std
kw = dict(lr=args.learning_rate, betas=(0.9, 0.98), eps=1e-9,
weight_decay=args.weight_decay)
if args.optimizer == 'adam':
optimizer = FusedAdam(model.parameters(), **kw)
elif args.optimizer == 'lamb':
optimizer = FusedLAMB(model.parameters(), **kw)
else:
raise ValueError
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
if args.ema_decay > 0:
ema_model = copy.deepcopy(model)
else:
ema_model = None
if distributed_run:
model = DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank,
find_unused_parameters=True)
train_state = {'epoch': 1, 'total_iter': 1}
checkpointer = Checkpointer(args.output, args.keep_milestones)
checkpointer.maybe_load(model, optimizer, scaler, train_state, args,
ema_model)
start_epoch = train_state['epoch']
total_iter = train_state['total_iter']
criterion = FastPitchLoss(
dur_predictor_loss_scale=args.dur_predictor_loss_scale,
pitch_predictor_loss_scale=args.pitch_predictor_loss_scale,
attn_loss_scale=args.attn_loss_scale)
collate_fn = TTSCollate()
if args.local_rank == 0:
prepare_tmp(args.pitch_online_dir)
trainset = TTSDataset(audiopaths_and_text=args.training_files, **vars(args))
valset = TTSDataset(audiopaths_and_text=args.validation_files, **vars(args))
ensure_disjoint(trainset, valset)
if distributed_run:
train_sampler = RepeatedDistributedSampler(args.trainloader_repeats,
trainset, drop_last=True)
val_sampler = DistributedSampler(valset)
shuffle = False
else:
train_sampler, val_sampler, shuffle = None, None, True
# 4 workers are optimal on DGX-1 (from epoch 2 onwards)
kw = {'num_workers': args.num_workers, 'batch_size': args.batch_size,
'collate_fn': collate_fn}
train_loader = RepeatedDataLoader(args.trainloader_repeats, trainset,
shuffle=shuffle, drop_last=True,
sampler=train_sampler, pin_memory=True,
persistent_workers=True, **kw)
val_loader = DataLoader(valset, shuffle=False, sampler=val_sampler,
pin_memory=False, **kw)
if args.ema_decay:
mt_ema_params = init_multi_tensor_ema(model, ema_model)
model.train()
bmark_stats = BenchmarkStats()
torch.cuda.synchronize()
for epoch in range(start_epoch, args.epochs + 1):
epoch_start_time = time.perf_counter()
epoch_loss = 0.0
epoch_mel_loss = 0.0
epoch_num_frames = 0
epoch_frames_per_sec = 0.0
if distributed_run:
train_loader.sampler.set_epoch(epoch)
iter_loss = 0
iter_num_frames = 0
iter_meta = {}
iter_start_time = time.perf_counter()
epoch_iter = 1
for batch, accum_step in zip(train_loader,
cycle(range(1, args.grad_accumulation + 1))):
if accum_step == 1:
adjust_learning_rate(total_iter, optimizer, args.learning_rate,
args.warmup_steps)
model.zero_grad(set_to_none=True)
x, y, num_frames = batch_to_gpu(batch)
with torch.cuda.amp.autocast(enabled=args.amp):
y_pred = model(x)
loss, meta = criterion(y_pred, y)
if (args.kl_loss_start_epoch is not None
and epoch >= args.kl_loss_start_epoch):
if args.kl_loss_start_epoch == epoch and epoch_iter == 1:
print('Begin hard_attn loss')
_, _, _, _, _, _, _, _, attn_soft, attn_hard, _, _ = y_pred
binarization_loss = attention_kl_loss(attn_hard, attn_soft)
kl_weight = min((epoch - args.kl_loss_start_epoch) / args.kl_loss_warmup_epochs, 1.0) * args.kl_loss_weight
meta['kl_loss'] = binarization_loss.clone().detach() * kl_weight
loss += kl_weight * binarization_loss
else:
meta['kl_loss'] = torch.zeros_like(loss)
kl_weight = 0
binarization_loss = 0
loss /= args.grad_accumulation
meta = {k: v / args.grad_accumulation
for k, v in meta.items()}
if args.amp:
scaler.scale(loss).backward()
else:
loss.backward()
if distributed_run:
reduced_loss = reduce_tensor(loss.data, args.world_size).item()
reduced_num_frames = reduce_tensor(num_frames.data, 1).item()
meta = {k: reduce_tensor(v, args.world_size) for k, v in meta.items()}
else:
reduced_loss = loss.item()
reduced_num_frames = num_frames.item()
if np.isnan(reduced_loss):
raise Exception("loss is NaN")
iter_loss += reduced_loss
iter_num_frames += reduced_num_frames
iter_meta = {k: iter_meta.get(k, 0) + meta.get(k, 0) for k in meta}
if accum_step % args.grad_accumulation == 0:
logger.log_grads_tb(total_iter, model)
if args.amp:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
scaler.step(optimizer)
scaler.update()
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
optimizer.step()
if args.ema_decay > 0.0:
apply_multi_tensor_ema(args.ema_decay, *mt_ema_params)
iter_mel_loss = iter_meta['mel_loss'].item()
iter_kl_loss = iter_meta['kl_loss'].item()
iter_time = time.perf_counter() - iter_start_time
epoch_frames_per_sec += iter_num_frames / iter_time
epoch_loss += iter_loss
epoch_num_frames += iter_num_frames
epoch_mel_loss += iter_mel_loss
num_iters = len(train_loader) // args.grad_accumulation
log((epoch, epoch_iter, num_iters), tb_total_steps=total_iter,
subset='train', data=OrderedDict([
('loss', iter_loss),
('mel_loss', iter_mel_loss),
('kl_loss', iter_kl_loss),
('kl_weight', kl_weight),
('frames/s', iter_num_frames / iter_time),
('took', iter_time),
('lrate', optimizer.param_groups[0]['lr'])]),
)
iter_loss = 0
iter_num_frames = 0
iter_meta = {}
iter_start_time = time.perf_counter()
if epoch_iter == num_iters:
break
epoch_iter += 1
total_iter += 1
# Finished epoch
epoch_loss /= epoch_iter
epoch_mel_loss /= epoch_iter
epoch_time = time.perf_counter() - epoch_start_time
log((epoch,), tb_total_steps=None, subset='train_avg',
data=OrderedDict([
('loss', epoch_loss),
('mel_loss', epoch_mel_loss),
('frames/s', epoch_num_frames / epoch_time),
('took', epoch_time)]),
)
bmark_stats.update(epoch_num_frames, epoch_loss, epoch_mel_loss,
epoch_time)
if epoch % args.validation_freq == 0:
validate(model, epoch, total_iter, criterion, val_loader,
distributed_run, batch_to_gpu)
if args.ema_decay > 0:
validate(ema_model, epoch, total_iter, criterion, val_loader,
distributed_run, batch_to_gpu, ema=True)
# save before making sched.step() for proper loading of LR
checkpointer.maybe_save(args, model, ema_model, optimizer, scaler,
epoch, total_iter, model_config)
logger.flush()
# Finished training
if len(bmark_stats) > 0:
log((), tb_total_steps=None, subset='train_avg',
data=bmark_stats.get(args.benchmark_epochs_num))
validate(model, None, total_iter, criterion, val_loader, distributed_run,
batch_to_gpu)
if __name__ == '__main__':
main()
|
TensorFlow/Recommendation/WideAndDeep/scripts | scripts | DGXA100_benchmark_training_tf32_8gpu | #!/bin/bash
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
set -e
mpiexec --allow-run-as-root --bind-to socket -np 8 \
python -m trainer.task \
--hvd \
--benchmark_warmup_steps 500 \
--benchmark_steps 1000 \
--gpu \
--benchmark
|
PyTorch/Classification/GPUNet/triton/runner/maintainer | maintainer | __init__ | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .container import Container # noqa: F401
from .docker.maintainer import DockerMaintainer # noqa: F401
from .maintainer import Maintainer # noqa: F401
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_resnet_v1_ppn_feature_extractor_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 feature extractors."""
import tensorflow as tf
from object_detection.models import ssd_resnet_v1_ppn_feature_extractor
from object_detection.models import ssd_resnet_v1_ppn_feature_extractor_testbase
class SSDResnet50V1PpnFeatureExtractorTest(
ssd_resnet_v1_ppn_feature_extractor_testbase.
SSDResnetPpnFeatureExtractorTestBase):
"""SSDResnet50v1 feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return ssd_resnet_v1_ppn_feature_extractor.SSDResnet50V1PpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding)
def _scope_name(self):
return 'resnet_v1_50'
class SSDResnet101V1PpnFeatureExtractorTest(
ssd_resnet_v1_ppn_feature_extractor_testbase.
SSDResnetPpnFeatureExtractorTestBase):
"""SSDResnet101v1 feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return (
ssd_resnet_v1_ppn_feature_extractor.SSDResnet101V1PpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _scope_name(self):
return 'resnet_v1_101'
class SSDResnet152V1PpnFeatureExtractorTest(
ssd_resnet_v1_ppn_feature_extractor_testbase.
SSDResnetPpnFeatureExtractorTestBase):
"""SSDResnet152v1 feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return (
ssd_resnet_v1_ppn_feature_extractor.SSDResnet152V1PpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _scope_name(self):
return 'resnet_v1_152'
if __name__ == '__main__':
tf.test.main()
|
Kaldi/SpeechRecognition/scripts/docker | docker | launch_download | #!/bin/bash
# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Start Triton server container for download - need some kaldi tools
docker run --rm \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-v $PWD/data:/mnt/data \
triton_kaldi_server /workspace/scripts/docker/dataset_setup.sh $(id -u) $(id -g)
# --user $(id -u):$(id -g) \
|
PyTorch/Classification/ConvNets/triton | triton | run_online_performance_test_on_triton | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, performance_file: str):
with open(performance_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row["avg latency"] = calculate_average_latency(row)
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def online_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
triton_gpu_engine_count: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Dynamic batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
print(f"Running performance tests for dynamic batching")
performance_file = f"triton_performance_dynamic_partial.csv"
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 32)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_file} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
results = list()
update_performance_data(results=results, performance_file=performance_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
os.remove(performance_file)
print("Performance results for dynamic batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument(
"--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server"
)
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
online_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
|
PyTorch/LanguageModeling/BERT/distillation/BERT_4L_312D | BERT_4L_312D | config | {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 312,
"initializer_range": 0.02,
"intermediate_size": 1200,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 4,
"type_vocab_size": 2,
"vocab_size": 30528
}
|
PyTorch/Recommendation/DLRM/tests | tests | test_custom_dot | #!/bin/bash
NAMES=${1:-'*.yaml'}
COMMON_OPTS="--embedding_type=joint_sparse"
bash test_with_opts.sh "${NAMES}" "${COMMON_OPTS}"
#
# usage:
# docker build . -t nvidia_dlrm_pyt
# docker run --security-opt seccomp=unconfined --runtime=nvidia -it --rm --ipc=host -v ${PWD}/data:/data nvidia_dlrm_pyt bash
# cd tests
# bash test_custom_dot.sh |
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit | deployment_toolkit | report | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/dataset | dataset | electricity | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: data.datasets.create_datasets
config:
graph: False
source_path: /workspace/datasets/electricity/electricity.csv
dest_path: /workspace/datasets/electricity/
time_ids: 'days_from_start'
train_range:
- 0
- 1315
valid_range:
- 1308
- 1339
test_range:
- 1332
- 10000
dataset_stride: 1
scale_per_id: True
encoder_length: 168
example_length: 192
MultiID: False
features:
- name: 'categorical_id'
feature_type: 'ID'
feature_embed_type: 'CATEGORICAL'
cardinality: 371
- name: 'hours_from_start'
feature_type: 'TIME'
feature_embed_type: 'CONTINUOUS'
- name: 'power_usage_weight'
feature_type: 'WEIGHT'
feature_embed_type: 'CONTINUOUS'
- name: 'power_usage'
feature_type: 'TARGET'
feature_embed_type: 'CONTINUOUS'
scaler:
_target_: sklearn.preprocessing.StandardScaler
- name: 'hour'
feature_type: 'KNOWN'
feature_embed_type: 'CATEGORICAL'
cardinality: 25
- name: 'day_of_week'
feature_type: 'KNOWN'
feature_embed_type: 'CATEGORICAL'
cardinality: 8
- name: 'hours_from_start'
feature_type: 'KNOWN'
feature_embed_type: 'CONTINUOUS'
scaler:
_target_: sklearn.preprocessing.StandardScaler
- name: 'categorical_id'
feature_type: 'STATIC'
feature_embed_type: 'CATEGORICAL'
cardinality: 371
train_samples: 450000
valid_samples: 50000
binarized: True
time_series_count: 369
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | feature_map_generators_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature map generators."""
from absl.testing import parameterized
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import feature_map_generators
from object_detection.protos import hyperparams_pb2
INCEPTION_V2_LAYOUT = {
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 256],
'anchor_strides': [16, 32, 64, -1, -1, -1],
'layer_target_norm': [20.0, -1, -1, -1, -1, -1],
}
INCEPTION_V3_LAYOUT = {
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1],
'aspect_ratios': [1.0, 2.0, 1.0/2, 3.0, 1.0/3]
}
EMBEDDED_SSD_MOBILENET_V1_LAYOUT = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''],
'layer_depth': [-1, -1, 512, 256, 256],
'conv_kernel_size': [-1, -1, 3, 3, 2],
}
SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = {
'from_layer': ['Conv2d_13_pointwise', '', '', ''],
'layer_depth': [-1, 256, 256, 256],
}
@parameterized.parameters(
{'use_keras': False},
{'use_keras': True},
)
class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_map_generator(self, feature_map_layout, use_keras,
pool_residual=False):
if use_keras:
return feature_map_generators.KerasMultiResolutionFeatureMaps(
feature_map_layout=feature_map_layout,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
freeze_batchnorm=False,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
name='FeatureMaps'
)
else:
def feature_map_generator(image_features):
return feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
image_features=image_features,
pool_residual=pool_residual)
return feature_map_generator
def test_get_expected_feature_map_shapes_with_inception_v2(self, use_keras):
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V2_LAYOUT,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256),
'Mixed_4c': (4, 14, 14, 576),
'Mixed_5c': (4, 7, 7, 1024),
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
# TODO(kaftan): Remove conditional after CMLE moves to TF 1.10
def test_get_expected_feature_map_shapes_use_explicit_padding(
self, use_keras):
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
layout_copy = INCEPTION_V2_LAYOUT.copy()
layout_copy['use_explicit_padding'] = True
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=layout_copy,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256),
'Mixed_4c': (4, 14, 14, 576),
'Mixed_5c': (4, 7, 7, 1024),
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v3(self, use_keras):
image_features = {
'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),
'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),
'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V3_LAYOUT,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_5d': (4, 35, 35, 256),
'Mixed_6e': (4, 17, 17, 576),
'Mixed_7c': (4, 8, 8, 1024),
'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1(
self, use_keras):
image_features = {
'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512],
dtype=tf.float32),
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_feature_map_shapes = {
'Conv2d_11_pointwise': (4, 16, 16, 512),
'Conv2d_13_pointwise': (4, 8, 8, 1024),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1(
self, use_keras):
image_features = {
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT,
use_keras=use_keras,
pool_residual=True
)
feature_maps = feature_map_generator(image_features)
expected_feature_map_shapes = {
'Conv2d_13_pointwise': (4, 8, 8, 1024),
'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names_with_inception_v2(self, use_keras):
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V2_LAYOUT,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_slim_variables = set([
'Mixed_5c_1_Conv2d_3_1x1_256/weights',
'Mixed_5c_1_Conv2d_3_1x1_256/biases',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases',
'Mixed_5c_1_Conv2d_4_1x1_128/weights',
'Mixed_5c_1_Conv2d_4_1x1_128/biases',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases',
'Mixed_5c_1_Conv2d_5_1x1_128/weights',
'Mixed_5c_1_Conv2d_5_1x1_128/biases',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases',
])
expected_keras_variables = set([
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',
])
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
sess.run(feature_maps)
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
if use_keras:
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
self.assertSetEqual(expected_slim_variables, actual_variable_set)
# TODO(kaftan): Remove conditional after CMLE moves to TF 1.10
class FPNFeatureMapGeneratorTest(tf.test.TestCase):
def test_get_expected_feature_map_shapes(self):
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_maps = feature_map_generators.fpn_top_down_feature_maps(
image_features=image_features, depth=128)
expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128),
'top_down_block3': (4, 4, 4, 128),
'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128)
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = {key: value.shape
for key, value in out_feature_maps.items()}
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_depthwise(self):
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_maps = feature_map_generators.fpn_top_down_feature_maps(
image_features=image_features, depth=128, use_depthwise=True)
expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128),
'top_down_block3': (4, 4, 4, 128),
'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128)
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = {key: value.shape
for key, value in out_feature_maps.items()}
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
class GetDepthFunctionTest(tf.test.TestCase):
def test_return_min_depth_when_multiplier_is_small(self):
depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,
min_depth=16)
self.assertEqual(depth_fn(16), 16)
def test_return_correct_depth_with_multiplier(self):
depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,
min_depth=16)
self.assertEqual(depth_fn(64), 32)
@parameterized.parameters(
{'replace_pool_with_conv': False},
{'replace_pool_with_conv': True},
)
class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase):
def test_get_expected_feature_map_shapes(self, replace_pool_with_conv):
image_features = {
'image_features': tf.random_uniform([4, 19, 19, 1024])
}
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=1024,
num_layers=6,
image_features=image_features,
replace_pool_with_conv=replace_pool_with_conv)
expected_pool_feature_map_shapes = {
'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),
'MaxPool2d_0_2x2': (4, 10, 10, 1024),
'MaxPool2d_1_2x2': (4, 5, 5, 1024),
'MaxPool2d_2_2x2': (4, 3, 3, 1024),
'MaxPool2d_3_2x2': (4, 2, 2, 1024),
'MaxPool2d_4_2x2': (4, 1, 1, 1024),
}
expected_conv_feature_map_shapes = {
'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),
'Conv2d_0_3x3_s2_1024': (4, 10, 10, 1024),
'Conv2d_1_3x3_s2_1024': (4, 5, 5, 1024),
'Conv2d_2_3x3_s2_1024': (4, 3, 3, 1024),
'Conv2d_3_3x3_s2_1024': (4, 2, 2, 1024),
'Conv2d_4_3x3_s2_1024': (4, 1, 1, 1024),
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = {key: value.shape
for key, value in out_feature_maps.items()}
if replace_pool_with_conv:
self.assertDictEqual(expected_conv_feature_map_shapes,
out_feature_map_shapes)
else:
self.assertDictEqual(expected_pool_feature_map_shapes,
out_feature_map_shapes)
def test_get_expected_variable_names(self, replace_pool_with_conv):
image_features = {
'image_features': tf.random_uniform([4, 19, 19, 1024])
}
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=1024,
num_layers=6,
image_features=image_features,
replace_pool_with_conv=replace_pool_with_conv)
expected_pool_variables = set([
'Base_Conv2d_1x1_1024/weights',
'Base_Conv2d_1x1_1024/biases',
])
expected_conv_variables = set([
'Base_Conv2d_1x1_1024/weights',
'Base_Conv2d_1x1_1024/biases',
'Conv2d_0_3x3_s2_1024/weights',
'Conv2d_0_3x3_s2_1024/biases',
'Conv2d_1_3x3_s2_1024/weights',
'Conv2d_1_3x3_s2_1024/biases',
'Conv2d_2_3x3_s2_1024/weights',
'Conv2d_2_3x3_s2_1024/biases',
'Conv2d_3_3x3_s2_1024/weights',
'Conv2d_3_3x3_s2_1024/biases',
'Conv2d_4_3x3_s2_1024/weights',
'Conv2d_4_3x3_s2_1024/biases',
])
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
sess.run(feature_maps)
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
if replace_pool_with_conv:
self.assertSetEqual(expected_conv_variables, actual_variable_set)
else:
self.assertSetEqual(expected_pool_variables, actual_variable_set)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/dataset | dataset | __init__ | from .dataset import Dataset
|
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | test_infer | #!/bin/bash
BATCH_SIZE=1
INPUT_LENGTH=128
NUM_ITERS=1003 # extra 3 iterations for warmup
TACOTRON2_CKPT="tacotron2_1032590_6000_amp"
WAVEGLOW_CKPT="waveglow_1076430_14000_amp"
RUN_MODE="" # = fp32
LOG_RUN_MODE="gpu_fp32"
TEST_PROGRAM="test_infer.py"
WN_CHANNELS=512
LOG_SUFFIX_ADD="" #additional info, e.g., GPU type
while [ -n "$1" ]
do
case "$1" in
-bs|--batch-size)
BATCH_SIZE="$2"
shift
;;
-il|--input-length)
INPUT_LENGTH="$2"
shift
;;
--num-iters)
NUM_ITERS="$2"
shift
;;
--test)
TEST_PROGRAM="$2"
shift
;;
--tacotron2)
TACOTRON2_CKPT="$2"
shift
;;
--encoder)
ENCODER_CKPT="$2"
shift
;;
--decoder)
DECODER_CKPT="$2"
shift
;;
--postnet)
POSTNET_CKPT="$2"
shift
;;
--waveglow)
WAVEGLOW_CKPT="$2"
shift
;;
--wn-channels)
WN_CHANNELS="$2"
shift
;;
--cpu)
RUN_MODE="--cpu"
LOG_RUN_MODE="cpu_fp32"
;;
--fp16)
RUN_MODE="--fp16"
LOG_RUN_MODE="gpu_fp16"
;;
--log-suffix)
LOG_SUFFIX_ADD="$2"
shift
;;
*)
echo "Option $1 not recognized"
esac
shift
done
LOG_SUFFIX=bs${BATCH_SIZE}_il${INPUT_LENGTH}_${LOG_RUN_MODE}_wn${WN_CHANNELS}_${LOG_SUFFIX_ADD}
NVLOG_FILE=nvlog_${LOG_SUFFIX}.json
TMP_LOGFILE=tmp_log_${LOG_SUFFIX}.log
LOGFILE=log_${LOG_SUFFIX}.log
if [ "$TEST_PROGRAM" = "tensorrt/test_infer_trt.py" ]
then
TACOTRON2_PARAMS="--encoder $ENCODER_CKPT --decoder $DECODER_CKPT --postnet $POSTNET_CKPT"
else
TACOTRON2_PARAMS="--tacotron2 $TACOTRON2_CKPT"
fi
set -x
python $TEST_PROGRAM \
$TACOTRON2_PARAMS \
--waveglow $WAVEGLOW_CKPT \
--batch-size $BATCH_SIZE \
--input-length $INPUT_LENGTH \
--log-file $NVLOG_FILE \
--num-iters $NUM_ITERS \
--wn-channels $WN_CHANNELS \
$RUN_MODE \
|& tee $TMP_LOGFILE
set +x
PERF=$(cat $TMP_LOGFILE | grep -F 'Throughput average (samples/sec)' | awk -F'= ' '{print $2}')
NUM_MELS=$(cat $TMP_LOGFILE | grep -F 'Number of mels per audio average' | awk -F'= ' '{print $2}')
LATENCY=$(cat $TMP_LOGFILE | grep -F 'Latency average (seconds)' | awk -F'= ' '{print $2}')
LATENCYSTD=$(cat $TMP_LOGFILE | grep -F 'Latency std (seconds)' | awk -F'= ' '{print $2}')
LATENCY50=$(cat $TMP_LOGFILE | grep -F 'Latency cl 50 (seconds)' | awk -F'= ' '{print $2}')
LATENCY90=$(cat $TMP_LOGFILE | grep -F 'Latency cl 90 (seconds)' | awk -F'= ' '{print $2}')
LATENCY95=$(cat $TMP_LOGFILE | grep -F 'Latency cl 95 (seconds)' | awk -F'= ' '{print $2}')
LATENCY99=$(cat $TMP_LOGFILE | grep -F 'Latency cl 99 (seconds)' | awk -F'= ' '{print $2}')
echo "$BATCH_SIZE,$INPUT_LENGTH,$LOG_RUN_MODE,$NUM_ITERS,$LATENCY,$LATENCYSTD,$LATENCY50,$LATENCY90,$LATENCY95,$LATENCY99,$PERF,$NUM_MELS" | tee $LOGFILE
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ModulationRemovalPlugin | taco2ModulationRemovalPlugin | taco2ModulationRemovalLayerPluginCreator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_MODULATIONREMOVALLAYERPLUGINCREATOR_H
#define TT2I_MODULATIONREMOVALLAYERPLUGINCREATOR_H
#include "NvInfer.h"
#include <string>
#ifdef DEVEL
// The destructor of nvinfer1::IPluginCreator is non-virtual and public, so
// we need to supress the warning.
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#endif
namespace nvinfer1
{
namespace plugin
{
class Taco2ModulationRemovalLayerPluginCreator : public nvinfer1::IPluginCreator
{
public:
/**
* @brief Get the collection of fields for this plugin, with their names only.
*
* @return The collection of fields.
*/
static nvinfer1::PluginFieldCollection* getFields();
/**
* @brief Create a new Taco2ModulationRemovalLayerPluginCreator.
*/
Taco2ModulationRemovalLayerPluginCreator();
/**
* @brief Get the name of the plugin.
*
* @return The name of the plugin.
*/
const char* getPluginName() const override;
/**
* @brief Get the plugin version.
*
* @return The plugin version.
*/
const char* getPluginVersion() const override;
/**
* @brief Get the collection of fields for this plugin.
*
* @return The collection of fields.
*/
const nvinfer1::PluginFieldCollection* getFieldNames() override;
/**
* @brief Create a new Taco2ModulationRemovalLayerPlugin.
*
* @param name The name (unused currently).
* @param fc The collection of fields to initialize with.
*
* @return The created plugin.
*/
nvinfer1::IPluginV2* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) override;
/**
* @brief Create a custom layer by name from a data stream.
*
* @param layerName The name of the layer.
* @param serialData The serialized data for the layer.
* @param serialLength The length of the serialized data.
*
* @return The plugin. Clients must destroy the plugin once all consumers of
* it have been destroyed.
*/
nvinfer1::IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override;
/**
* @brief Set the namespace for created plugins.
*
* @param pluginNamespace The namespace.
*/
void setPluginNamespace(const char* pluginNamespace) override;
/**
* @brief Get the namespace for created plugins.
*
* @return The namespace.
*/
const char* getPluginNamespace() const override;
private:
std::string mNamespace;
};
} // namespace plugin
} // namespace nvinfer1
#ifdef DEVEL
#pragma GCC diagnostic pop
#endif
#endif
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | decoderInstancePlain | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "decoderInstancePlain.h"
#include "cudaUtils.h"
#include "trtUtils.h"
#include <algorithm>
#include <cuda_runtime.h>
#include <numeric>
#include <stdexcept>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
DecoderInstancePlain::DecoderInstancePlain(
TRTPtr<ICudaEngine> engine, const int maxChunkSize) :
DecoderInstance(std::move(engine), maxChunkSize),
mBinding(),
mInputWeightsDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_WEIGHTS_NAME)),
mOutputWeightsDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_WEIGHTS_NAME)),
mInAttentionHiddenStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_ATTENTIONHIDDEN_NAME)),
mInAttentionCellStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_ATTENTIONCELL_NAME)),
mOutAttentionHiddenStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_ATTENTIONHIDDEN_NAME)),
mOutAttentionCellStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_ATTENTIONCELL_NAME)),
mInputAttentionContextDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_CONTEXT_NAME)),
mOutputAttentionContextDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_CONTEXT_NAME)),
mInDecoderHiddenStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_DECODERHIDDEN_NAME)),
mInDecoderCellStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), INPUT_DECODERCELL_NAME)),
mOutDecoderHiddenStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_DECODERHIDDEN_NAME)),
mOutDecoderCellStatesDevice(
getMaxBatchSize()
* TRTUtils::getBindingSize(getEngine(), OUTPUT_DECODERCELL_NAME))
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void DecoderInstancePlain::reset(cudaStream_t stream)
{
DecoderInstance::reset(stream);
mInputWeightsDevice.zeroAsync(stream);
mInAttentionHiddenStatesDevice.zeroAsync(stream);
mInAttentionCellStatesDevice.zeroAsync(stream);
mInputAttentionContextDevice.zeroAsync(stream);
mOutputAttentionContextDevice.zeroAsync(stream);
mInDecoderHiddenStatesDevice.zeroAsync(stream);
mInDecoderCellStatesDevice.zeroAsync(stream);
}
/******************************************************************************
* PROTECTED METHODS **********************************************************
*****************************************************************************/
void DecoderInstancePlain::decode(cudaStream_t stream, IExecutionContext& context, const int batchSize,
const float* const inputLastFrameDevice, const float* const inputMemoryDevice,
const float* const inputProcessedMemoryDevice, const float* const inputMaskDevice,
const int32_t* const /* inputLengthHost */, const int32_t* const inputLengthDevice,
const float* const inputDropoutDevice, float* const outputChannelsDevice)
{
const ICudaEngine& engine = context.getEngine();
mBinding.setBinding(engine, INPUT_MASK_NAME, inputMaskDevice);
mBinding.setBinding(engine, INPUT_LENGTH_NAME, inputLengthDevice);
mBinding.setBinding(engine, INPUT_DROPOUT_NAME, inputDropoutDevice);
mBinding.setBinding(engine, INPUT_MEMORY_NAME, inputMemoryDevice);
mBinding.setBinding(engine, INPUT_PROCESSED_NAME, inputProcessedMemoryDevice);
mBinding.setBinding(engine, INPUT_WEIGHTS_NAME, mInputWeightsDevice.data());
mBinding.setBinding(engine, INPUT_LASTFRAME_NAME, inputLastFrameDevice);
mBinding.setBinding(engine, INPUT_CONTEXT_NAME, mInputAttentionContextDevice.data());
mBinding.setBinding(engine, INPUT_ATTENTIONHIDDEN_NAME, mInAttentionHiddenStatesDevice.data());
mBinding.setBinding(engine, INPUT_ATTENTIONCELL_NAME, mInAttentionCellStatesDevice.data());
mBinding.setBinding(engine, INPUT_DECODERHIDDEN_NAME, mInDecoderHiddenStatesDevice.data());
mBinding.setBinding(engine, INPUT_DECODERCELL_NAME, mInDecoderCellStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_CONTEXT_NAME, mOutputAttentionContextDevice.data());
mBinding.setBinding(engine, OUTPUT_WEIGHTS_NAME, mOutputWeightsDevice.data());
mBinding.setBinding(engine, OUTPUT_ATTENTIONHIDDEN_NAME, mOutAttentionHiddenStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_ATTENTIONCELL_NAME, mOutAttentionCellStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_DECODERHIDDEN_NAME, mOutDecoderHiddenStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_DECODERCELL_NAME, mOutDecoderCellStatesDevice.data());
mBinding.setBinding(engine, OUTPUT_CHANNELS_NAME, outputChannelsDevice);
if (!context.enqueue(batchSize, mBinding.getBindings(), stream, nullptr))
{
throw std::runtime_error("Failed to run decoder.");
}
// swap pointers
std::swap(mInputWeightsDevice, mOutputWeightsDevice);
std::swap(mInputAttentionContextDevice, mOutputAttentionContextDevice);
std::swap(mInAttentionHiddenStatesDevice, mOutAttentionHiddenStatesDevice);
std::swap(mInAttentionCellStatesDevice, mOutAttentionCellStatesDevice);
std::swap(mInDecoderHiddenStatesDevice, mOutDecoderHiddenStatesDevice);
std::swap(mInDecoderCellStatesDevice, mOutDecoderCellStatesDevice);
// required because of LSTM cells
CudaUtils::sync(stream);
}
} // namespace tts
|
Tools/DGLPyTorch/SyntheticGraphGeneration | SyntheticGraphGeneration | README | # Synthetic Graph Generation
This repository implements a tool for generating graphs with an arbitrary size, including node and edge tabular features.
## Table Of Contents
- [Solution overview](#solution-overview)
* [Synthetic Graph Generation architecture](#synthetic-graph-generation-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Models](#models)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Repository structure](#repository-structure)
* [Important scripts and files](#important-scripts-and-files)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Define the synthesizer pipeline](#define-the-synthesizer-pipeline)
* [Getting the data](#getting-the-data)
+ [List of datasets](#list-of-datasets)
- [Performance](#Performance)
* [Results](#results)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
- [Reference](#reference)
* [Cite](#cite)
## Solution overview
Synthetic data generation has become pervasive with imploding amounts of data and demand to deploy machine learning models leveraging such data. There has been an increasing interest in leveraging graph-based neural network model on graph datasets, though many public datasets are of a much smaller scale than that used in real-world applications. Synthetic Graph Generation is a common problem in multiple domains for various applications, including the generation of big graphs with similar properties to original or anonymizing data that cannot be shared. The Synthetic Graph Generation tool enables users to generate arbitrary graphs based on provided real data.
### Synthetic Graph Generation architecture
The tool has the following architecture.

The module is composed of three parts: a structural generator, which fits the graph structure, feature generator, which fits the feature distribution contained in the graph; and finally, an aligner, which aligns the generated features with the generated graph structure
#### Graph structural generator
The graph structural generator fits graph structure and generate a corresponding graph containing the nodes and edges.
#### Feature generator
The feature generator fits the feature distribution contained in the graph and generates the corresponding features.
There is the option to allow users to generate features associated with nodes, edges, or both.
#### Aligner
The aligner aligns the generated features taken from the feature generator with the graph structure generated by a graph structural generator.
### Feature support matrix
This tool supports the following features:
| Feature | Synthetic Graph Generation |
|------------------------------|----------------------------|
| Non-partite graph generation | Yes |
| N-partite graph generation | Yes |
| Undirected graph generation | Yes |
| Directed graph generation | Yes |
| Self-loops generation | Yes |
| Edge features generation | Yes |
| Node features generation | Yes |
#### Features
* Non-partite graph generation is a task to generate a graph that doesn't contain any explicit partites (disjoint and independent sets of nodes).
* N-partite graph generation is a task to generate a graph that consists of an arbitrary number of partites.
* Undirected graph generation is a task to generate a graph made up of a set of vertices connected by not ordered edges.
* Directed graph generation is a task to generate a graph made up of a set of vertices connected by directed edges.
* Self-loops generation is a task to generate edges that connect a vertex to itself.
* Edge features generation is a task to generate features associated with an edge.
* Node features generation is a task to generate features associated with a node.
### Models
Structural graph generation
```
- RMAT
- Random (Erdos-Renyi)
```
Tabular features
```
- KDE
- Gaussian
- Uniform
- Random
- CTGAN (Conditional GAN)
```
Aligner
```
- XGBoost
```
## Setup
The following section lists the requirements you need to run the Synthetic Graph Generation tool.
### Requirements
This repository contains a Dockerfile that extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Ampere Architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- Custom Docker containers built for this tool. Refer to the steps in the [Quick Start Guide](#quick-start-guide).
For more information about how to get started with NGC containers, refer to the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
For those unable to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
### Getting Started
To use the tool, perform the following steps.
For the specifics concerning generation and training, refer to the [Advanced section](#advanced).
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples
```
2. Go to the `SyntheticGraphGeneration` tool directory within the `DeepLearningExamples` repository:
```
cd DeepLearningExamples/Tools/DGLPyTorch/SyntheticGraphGeneration
```
3. Build the SyntheticGraphGeneration container.
```
bash docker_scripts/build_docker.sh
```
4. Download the datasets. (It is advisable to run this command inside docker interactive container to ensure environment setup, see 6.1)
```
bash scripts/get_datasets.sh
```
**Note**: This script requires a manual download of 4 datasets (tabformer, ieee, paysim, credit) and putting them into `./data` directory with the correct naming. The instruction for the manual download will be printed during the preprocessing. If the raw data is not present or the dataset is already preprocessed, the preprocessing will be skipped.
5. Run the SyntheticGraphGeneration Jupyter notebook.
5.1. Run the Docker notebook container.
```
bash docker_scripts/run_docker_notebook.sh
```
5.2 Open Jupyter notebook.
```
http://localhost:9916/tree/demos
```
6. Run the SyntheticGraphGeneration CLI.
6.1. Run the Docker interactive container.
```
bash docker_scripts/run_docker_interactive.sh
```
6.2. Run Command Line Interface (CLI) command.
The tool contains 3 run commands: `preprocess`, ``synthesize` and `pretrain`
For example, to synthesize a graph similar to the [IEEE](https://www.kaggle.com/c/ieee-fraud-detection) dataset, run the following commands:
1. Convert IEEE into the SynGen format:
```
syngen preprocess \
--dataset ieee \
--source-path /workspace/data/ieee-fraud/ \
--destination-path /workspace/data/ieee-preprocessed
```
**Note**: `--source-path` points to the location where the IEEE dataset is extracted,
and `destination-path` points to the location where the IEEE dataset in SynGen format is saved.
2. Prepare SynGen configuration manually or using:
```
syngen mimic-dataset \
--dataset-path /workspace/data/ieee-preprocessed \
--output-file /workspace/configurations/my_ieee_config.json \
--tab-gen kde \
--edge-scale 1 \
--node-scale 1
```
**Note**: In the above commands, the `kde` tabular generator will be used to generate all tabular features.
3. Generate synthetic IEEE
```
syngen synthesize \
--config-path /workspace/configurations/my_ieee_config.json \
--save-path /workspace/data/ieee-generated
```
**Note**: `--save-path` points to the location where the generated data in SynGen format is saved.
Following the above command, the `pretrain` command can be used to pre-train or fine-tune the given generated sample.
```
syngen pretrain \
--model gat_ec \
--hidden-dim 64 \
--out-dim 32 \
--n-layers 1 \
--n-heads 2 \
--weight-decay 0.0 \
--learning-rate 0.0005 \
--batch-size 256 \
--pretrain-epochs 5 \
--finetune-epochs 5 \
--data-path /workspace/data/ieee-preprocessed \
--edge-name user-product \
--pretraining-data-path /workspace/data/ieee-generated \
--pretraining-edge-name user-product \
--task ec \
--target-col isFraud \
--num-classes 2 \
--log-interval 1
```
**Note**: The current set of tasks and models are solely provided as use case examples on how to use the generated synthetic data to pretrain/fine-tune on a downstream task, and generally would need extension/modifications to accomodate very large graphs or arbitrary models.
For the complete CLI usage of the `synthesize` command run:
```
syngen synthesize --help
```
Similarly for the `pretrain`, `mimic-dataset`, and `preprocess` run:
```
syngen <COMMAND> --help
```
## Advanced
### Repository structure
```
.
├── demos # Directory with all the Jupyter examples
├── docker_scripts # Directory with Docker scripts
├── scripts # Directory with datasets scripts
├── syngen # Directory with Synthetic Graph Generation source code
│ ├── analyzer # Directory with tools for getting graph visualisation and statistics
│ │ ├── graph # Directory with graph structure analyzer
│ │ └── tabular # Directory with tabular features analyzer
│ ├── benchmark # Directory with pretraining tools
│ │ ├── data_loader # Directory with pre-defined node and edge classification datasets
│ │ ├── models # Directory with GNN model definitions
│ │ └── tasks # Directory with set of tasks that are supported for training
│ ├── cli # Directory with all cli commands
│ ├── configuration # Directory with SynGen formats
│ ├── generator # Directory with all the generators
│ │ ├── graph # Directory with graph generators and graph
│ │ └── tabular # Directory with tabular generators
│ │ ├── data_transformer # Directory with tabular data transformations used by generators
│ │ └── transforms # Directory with tabular column transforms
│ ├── graph_aligner # Directory with all the aligners
│ ├── preprocessing # Directory with the preprocessings for the supported datasets
│ │ └── datasets # Directory with example dataset preprocessing scripts used to generate data
│ ├── synthesizer # Directory with all the synthesizers
│ └── utils # Directory with the utilities
│ └── types # Directory with common data types used in the tool
```
### Important scripts and files
* `scripts/get_datasets.sh` - Bash script downloading and preprocessing supported datastes
* `docker_scripts/build_docker.sh` - Bash script that builds the Docker image
* `docker_scripts/run_docker_notebook.sh` - Bash script that runs Jupyter notebook in the Docker container
* `docker_scripts/run_docker_interactive.sh` - Bash script that runs the Docker container in interactive mode
* `syngen/synthesizer/configuration_graph_synthesizer.py` - Python file with graph synthesizer
### Parameters
For the synthesis process, refer to the parameters in the following table.
| Scope | parameter | Comment | Default Value |
|---------------|------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------|
| preprocess | --dataset DATASET_NAME | Dataset to preprocess into SynGen format. Available datasets : [cora, epinions, ogbn_mag, ogbn_mag240m, ieee, tabformer] | Required |
| preprocess | -sp | --source-path SOURCE_PATH | Path to downloaded raw dataset | Required |
| preprocess | -dp | --destination-path DESTINATION_PATH | Path to store the preprocessed dataset in SynGen format. | SOURCE_PATH/syngen_preprocessed |
| preprocess | --cpu | Runs all operations on CPU | |
| preprocess | --use-cache | Does nothing if the target preprocessed dataset exists | |
| preprocess | --download | Downloads the dataset to the specified SOURCE_PATH | |
| mimic-dataset | -dp | --dataset-path DATASET_PATH | Path to the dataset in SynGen format | |
| mimic-dataset | -of | --output-file OUTPUT_FILE | Path to the generated SynGen Configuration | |
| mimic-dataset | -tg | --tab-gen TABULAR_GENERATOR | Tabular Generator to use to generate all tabular features (You always can modify OUTPUT_FILE). Available options: [kde, random, gaussian, uniform, ctgan] | kde |
| mimic-dataset | -rsg | --random-struct-gen | Generates random structure based on Erdos-Renyi model instead of mimicking | |
| mimic-dataset | -es | --edge-scale EDGE_SCALE | Multiples the number of edges to generate by the provided number | |
| mimic-dataset | -en | --node-scale NODE_SCALE | Multiples the number of nodes to generate by the provided number | |
| synthesize | -cp | --config-path CONFIG_PATH | Path to SynGen Configuration file that describes how to generate a graph | Required |
| synthesize | -sp | --save-path SAVE_PATH | Save path to dump generated files | Current directory |
| synthesize | --verbose | Displays generation process progress | |
| synthesize | --cpu | Runs all operations on CPU. [Attention] Alignment is not available on CPU | |
| synthesize | --timer-path FILE_PATH | Saves generation process timings to the specified file | Required |
For the pretraining refer to the to [Command-line options](#command-line-options), as the parameters depend on the model choice.
### Define the synthesizer pipeline
In this example, we show how to define the synthesizer pipeline for [IEEE](https://www.kaggle.com/c/ieee-fraud-detection) dataset. A full example can be found in [ieee_notebook](./demos/advanced_examples/e2e_ieee_demo.ipynb).
#### Prepare data
- Preprocessing class is used to convert the IEEE dataset into SynGen format.
```
preprocessing = IEEEPreprocessing(source_path='/workspace/data/ieee-fraud', destination_path='/workspace/data/ieee_preprocessed')
feature_spec = preprocessing.transform()
```
#### Prepare SynGen Configuration
- SynGen Configuration is used to specify all generation details. We use the original dataset feature spec as a base for the configuration
```
feature_spec_for_config = feature_spec.copy()
```
- Tabular generator is used to generate tabular features.
```
feature_spec_for_config[MetaData.EDGES][0][MetaData.TABULAR_GENERATORS] = [
{
MetaData.TYPE: "kde",
MetaData.FEATURES_LIST: -1, # copies all tabular features from the original dataset
MetaData.DATA_SOURCE: {
MetaData.TYPE: "configuration",
MetaData.PATH: preprocessed_path,
MetaData.NAME: "user-product",
},
MetaData.PARAMS: {}
}
]
```
- Structure generator is used to generate graph structure.
```
feature_spec_for_config[MetaData.EDGES][0][MetaData.STRUCTURE_GENERATOR] = {
MetaData.TYPE: "RMAT",
MetaData.DATA_SOURCE: {
MetaData.TYPE: "cfg", # the equivalent of 'configuration'
MetaData.PATH: preprocessed_path,
MetaData.NAME: "user-product",
},
MetaData.PARAMS: {
"seed": 42,
}
}
```
- After providing all related information, we create a `SynGenConfiguration` object. It fills out missing fields and validates provided data.
```
config = SynGenConfiguration(feature_spec_for_config)
```
#### Prepare synthesizer
- Synthesizer is a class that combines all the generators and allows the user to run end-to-end fitting and generation.
```
synthesizer = ConfigurationGraphSynthesizer(configuration=config, save_path='/workspace/data/ieee_generated')
```
- To start fitting process, we use `fit` method provided by the synthesizer. It will automatically load all required data from the disk based on the information provided in config.
```
synthesizer.fit()
```
#### Generate graph
- To run generation, we call the `generate` method provided by the synthesizer. We use `return_data=False` because we want only to store the generated in `/workspace/data/ieee_generated` folder. In other case it will download tabular data under the `MetaData.FEATURES_DATA` key for each node and edge type and structural data under the `MetaData.STRUCTURE_DATA` key for edges.
```
out_feature_spec = synthesizer.generate(return_data=False)
```
### Getting the data
To download the datasets used as an example , use `get_datasets.sh` script
```
bash scripts/get_datasets.sh
```
**Note**: Certain datasets require a Kaggle API key, hence may require manual download. Refer to the links below.
**Note**: Each user is responsible for checking the content of datasets and the applicable licenses and determining if they are suitable for the intended use
#### List of datasets
Supported datasets:
* [Twitch](https://snap.stanford.edu/data/twitch_gamers.html)
* [LastFM](https://snap.stanford.edu/data/feather-lastfm-social.html)
* [Orkut](https://snap.stanford.edu/data/com-Orkut.html)
* [Tabformer](https://github.com/IBM/TabFormer)
* [IEEE](https://www.kaggle.com/c/ieee-fraud-detection)
* [Paysim](https://www.kaggle.com/datasets/ealaxi/paysim1)
* [Credit](https://www.kaggle.com/datasets/kartik2112/fraud-detection)
* [CORA](https://relational.fit.cvut.cz/dataset/CORA)
* [Rating](http://www.trustlet.org/downloaded_epinions.html)
* [OGBN-MAG](https://ogb.stanford.edu/docs/nodeprop/#ogbn-mag)
* [OGBN-MAG](https://ogb.stanford.edu/docs/lsc/mag240m/)
## Performance
Our results were obtained by running the demo notebooks [directory](./demos) in the PyTorch NGC container on NVIDIA DGX1 V100 with 8x V100 32GB GPUs.
All the notebooks are presented in the table below.
| | scope | notebook | description |
|-----|-------------------|---------------------------------------|---------------------------------------------------------------------------------------------|
| 1. | basic_examples | e2e_cora_demo.ipynb | a complete process of generating a non-bipartite graph dataset with node features |
| 2. | basic_examples | e2e_ieee_demo.ipynb | a complete process of generating a bipartite graph dataset with edge features |
| 3. | basic_examples | e2e_epinions_demo.ipynb | a complete process of generating a heterogeneous bipartite graph dataset with edge features | |
| 4. | advanced_examples | big_graph_generation.ipynb | a complete process of mimicking and scaling the MAG240m dataset |
| 5. | performance | struct_generator.ipynb | comparison of SynGen graph structure generators |
| 6. | performance | tabular_generator.ipynb | comparison of SynGen tabular data generators |
Scope refers to the directories in which the notebooks are stored and the functionalities particular notebooks cover . There are
* Basic - [basic_examples](./demos/basic_examples) - notebooks with the examples of basics functionalities
* Advanced - [advanced_examples](./demos/advanced_examples) - notebooks with the examples of advanced functionalities
* Performance - [performance](./demos/performance) - notebooks with the performance experiments
To achieve the same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
#### Results
##### 1. Quality of the content of generated dataset vs. original dataset:
The quality of the content comparison was conducted on the IEEE dataset (refer to [List of datasets](#list-of-datasets) for more details) with corresponding notebook [e2e_ieee_demo.ipynb](./demos/advanced_examples/e2e_ieee_demo.ipynb)
We compared three modalities, that is, quality of generated graph structure, quality of generated tabular data and quality of aligning tabular data to the graph structure.
* Graph structure quality
* Comparison of degree distribution for an original graph, properly generated and random (Erdős–Rényi) 
* Comparison of basic graph statistics for an original graph, properly generated and random (Erdős–Rényi) 
* Tabular data quality
* Comparison of two first components of a PCA of real and generated data 
* Comparison of basic statistics between real and generated data
| Generator | kl divergence | correlation correlation |
|------------|---------------|-------------------------|
| GAN | 0.912 | 0.018 |
| Gaussian | 0.065 | -0.030 |
| Random | 0.617 | 0.026 |
* Structure to tabular alignment quality
* Degree centrality for feature distribution 
##### 2. Performance (speed) of the synthetic dataset generation:
* Performance of graph structure generation (edges/s)

* Performance of categorical tabular data generation (samples/s)
| Dataset (CPU/GPU) | KDE | Uniform | Gaussian | Random |
|-------------------|--------|---------|----------|---------|
| ieee (CPU) | 371296 | 897421 | 530683 | 440086 |
| ieee (GPU) | 592132 | 3621726 | 983408 | 6438646 |
##### 3. Synthetic dataset use-case specific quality factors:
* Performance (batches/s) comparison between original vs. synthetic datasets
| Dataset | Model | Synthetic | Original |
|---------|-------|-----------|----------|
| ieee | gat | 0.07173 | 0.07249 |
## Release notes
### Changelog
August 2023
- Heterogeneous graph generation
- Multi-GPU generation
January 2023
- Initial release
### Known issues
There are no known issues with this model.
## Reference
### Cite
Cite the following paper if you find this code useful or use it in your own work:
```
@article{darabi2022framework,
title={A Framework for Large Scale Synthetic Graph Dataset Generation},
author={Darabi, Sajad and Bigaj, Piotr and Majchrowski, Dawid and Morkisz, Pawel and Fit-Florea, Alex},
journal={arXiv preprint arXiv:2210.01944},
year={2022}
}
```
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/utils | utils | logging | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import time
import dllogger
import json
def init_logging(log_path, params_dict, enabled=True):
if not enabled:
return
json_backend = dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=log_path)
stdout_backend = dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)
stdout_backend._metadata['auc'].update({'format': '0:.6f'})
stdout_backend._metadata['validation_loss'].update({'format': '0:.6f'})
stdout_backend._metadata['throughput'].update({'format': ':.3e'})
stdout_backend._metadata['mean_step_time_ms'].update({'format': '0:.3f'})
stdout_backend._metadata['mean_inference_throughput'].update({'format': ':.3e'})
stdout_backend._metadata['mean_inference_latency'].update({'format': '0:.5f'})
for percentile in [90, 95, 99]:
stdout_backend._metadata[f'p{percentile}_inference_latency'].update({'format': '0:.5f'})
dllogger.init(backends=[json_backend, stdout_backend])
dllogger.log(data=params_dict, step='PARAMETER')
print("Command line flags:")
print(json.dumps(params_dict, indent=4))
class IterTimer:
def __init__(self, train_batch_size, test_batch_size, optimizer, print_freq=50,
enabled=True, benchmark_warmup_steps=None):
self.previous_tick = None
self.train_idx = 0
self.test_idx = 0
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.print_freq = print_freq
self.optimizer = optimizer
self.enabled = enabled
self.training_steps_time = 0
self.steps_measured = 0
if benchmark_warmup_steps is None:
self.benchmark_warmup_steps = print_freq * 2
else:
self.benchmark_warmup_steps = benchmark_warmup_steps
def step_train(self, loss=None):
if not self.enabled:
return
if self.train_idx < self.benchmark_warmup_steps:
self.train_idx += 1
return
if self.train_idx % self.print_freq == 0 and self.train_idx > 0:
if self.previous_tick is None:
self.previous_tick = time.time()
self.train_idx += 1
return
current_time = time.time()
elapsed = current_time - self.previous_tick
throughput = (self.train_batch_size * self.print_freq) / elapsed
throughput_in_millions = throughput / 1e6
step_time_ms = elapsed / self.print_freq * 1000
lr = f'{self.optimizer.lr.numpy().item():.4f}'
print(f'step={self.train_idx}, throughput={throughput_in_millions:.3f}M, step_time={step_time_ms:.3f} ms, learning_rate={lr}, loss={loss:.8f},')
self.previous_tick = current_time
self.training_steps_time += elapsed
self.steps_measured += self.print_freq
self.train_idx += 1
def mean_train_time(self):
if self.steps_measured == 0:
print("Run too short to measure mean training time")
return float('nan')
return self.training_steps_time / self.steps_measured
def step_test(self):
if not self.enabled:
return
if self.previous_tick is None:
self.previous_tick = time.time()
self.test_idx += 1
return
if self.test_idx % self.print_freq == self.print_freq - 1:
current_time = time.time()
elapsed = current_time - self.previous_tick
throughput = (self.test_batch_size * self.print_freq) / elapsed
throughput_in_millions = throughput / 1e6
step_time_ms = elapsed / self.print_freq * 1000
print(f'validation_step={self.test_idx}, validation_throughput={throughput_in_millions:.3f}M, step_time={step_time_ms:.3f} ms')
self.previous_tick = current_time
self.test_idx += 1
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer | perf_analyzer | exceptions | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PerfAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc | preproc | dgx2_config | #!/bin/bash
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the environment variables to run spark job
# should modify below environment variables
# below numbers should be adjusted according to the resource of your running environment
# set the total number of CPU cores, spark can use
export TOTAL_CORES=80
# set the number of executors
export NUM_EXECUTORS=16
# the cores for each executor, it'll be calculated
export NUM_EXECUTOR_CORES=$((${TOTAL_CORES}/${NUM_EXECUTORS}))
# unit: GB, set the max memory you want to use
export TOTAL_MEMORY=800
# unit: GB, set the memory for driver
export DRIVER_MEMORY=32
# the memory per executor
export EXECUTOR_MEMORY=$(((${TOTAL_MEMORY}-${DRIVER_MEMORY})/${NUM_EXECUTORS}-16))
|
DGLPyTorch/DrugDiscovery/SE3Transformer/scripts | scripts | predict | #!/usr/bin/env bash
# CLI args with defaults
BATCH_SIZE=${1:-240}
AMP=${2:-true}
# choices: 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv',
# 'U0_atom', 'U_atom', 'H_atom', 'G_atom', 'A', 'B', 'C'
TASK=homo
python -m torch.distributed.run --nnodes=1 --nproc_per_node=gpu --max_restarts 0 --module \
se3_transformer.runtime.inference \
--amp "$AMP" \
--batch_size "$BATCH_SIZE" \
--use_layer_norm \
--norm \
--load_ckpt_path model_qm9.pth \
--task "$TASK"
|
PyTorch/SpeechRecognition/wav2vec2/scripts | scripts | finetune_vox_960h_cv | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
# The model `Wav2Vec 2.0 Large (LV-60 + CV + SWBD + FSH)` fine-tuned on LS960
# has these changes wrt `wav2vec2_large_librivox.yaml`
: ${MAX_UPDATE:=80000}
: ${FREEZE_FINETUNE_UPDATES:=0}
: ${LEARNING_RATE:=0.00002}
: ${MASK_PROB:=0.25}
: ${MASK_CHANNEL_PROB:=0.5}
# Other changes (minor)
# --clip_norm=0 # =25
# --required_seq_len_multiple=1 # =2
bash scripts/finetune_vox_960h.sh
|
TensorFlow/Detection/SSD/models/research/object_detection/metrics | metrics | oid_od_challenge_evaluation_utils_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for oid_od_challenge_evaluation_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import tensorflow as tf
from object_detection.core import standard_fields
from object_detection.metrics import oid_od_challenge_evaluation_utils as utils
class OidOdChallengeEvaluationUtilTest(tf.test.TestCase):
def testBuildGroundtruthDictionary(self):
np_data = pd.DataFrame(
[['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 1, None], [
'fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0, None
], ['fe58ec1b06db2bb7', '/m/04bcr3', None, None, None, None, None, 1], [
'fe58ec1b06db2bb7', '/m/083vt', None, None, None, None, None, 0
], ['fe58ec1b06db2bb7', '/m/02gy9n', None, None, None, None, None, 1]],
columns=[
'ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax', 'IsGroupOf',
'ConfidenceImageLabel'
])
class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary(
np_data, class_label_map)
self.assertTrue(standard_fields.InputDataFields.groundtruth_boxes in
groundtruth_dictionary)
self.assertTrue(standard_fields.InputDataFields.groundtruth_classes in
groundtruth_dictionary)
self.assertTrue(standard_fields.InputDataFields.groundtruth_group_of in
groundtruth_dictionary)
self.assertTrue(standard_fields.InputDataFields.groundtruth_image_classes in
groundtruth_dictionary)
self.assertAllEqual(
np.array([1, 3]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_classes])
self.assertAllEqual(
np.array([1, 0]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_group_of])
expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2]])
self.assertNDArrayNear(
expected_boxes_data, groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_boxes], 1e-5)
self.assertAllEqual(
np.array([1, 2, 3]), groundtruth_dictionary[
standard_fields.InputDataFields.groundtruth_image_classes])
def testBuildPredictionDictionary(self):
np_data = pd.DataFrame(
[['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.3, 0.5, 0.6, 0.1], [
'fe58ec1b06db2bb7', '/m/02gy9n', 0.1, 0.2, 0.3, 0.4, 0.2
], ['fe58ec1b06db2bb7', '/m/04bcr3', 0.0, 0.1, 0.2, 0.3, 0.3]],
columns=[
'ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax', 'Score'
])
class_label_map = {'/m/04bcr3': 1, '/m/083vt': 2, '/m/02gy9n': 3}
prediction_dictionary = utils.build_predictions_dictionary(
np_data, class_label_map)
self.assertTrue(standard_fields.DetectionResultFields.detection_boxes in
prediction_dictionary)
self.assertTrue(standard_fields.DetectionResultFields.detection_classes in
prediction_dictionary)
self.assertTrue(standard_fields.DetectionResultFields.detection_scores in
prediction_dictionary)
self.assertAllEqual(
np.array([1, 3, 1]), prediction_dictionary[
standard_fields.DetectionResultFields.detection_classes])
expected_boxes_data = np.array([[0.5, 0.0, 0.6, 0.3], [0.3, 0.1, 0.4, 0.2],
[0.2, 0.0, 0.3, 0.1]])
self.assertNDArrayNear(
expected_boxes_data, prediction_dictionary[
standard_fields.DetectionResultFields.detection_boxes], 1e-5)
self.assertNDArrayNear(
np.array([0.1, 0.2, 0.3]), prediction_dictionary[
standard_fields.DetectionResultFields.detection_scores], 1e-5)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | run_latency_tests_cpu | export CUDA_VISIBLE_DEVICES=
export OMP_NUM_THREADS=6
export KMP_BLOCKTIME=0
export KMP_AFFINITY=granularity=fine,compact,1,0
bash test_infer.sh -bs 1 -il 128 -p fp32 --num-iters 1003 --tacotron2 tacotron2_1032590_6000_amp --waveglow waveglow_1076430_14000_amp --wn-channels 256 --cpu-run
bash test_infer.sh -bs 4 -il 128 -p fp32 --num-iters 1003 --tacotron2 tacotron2_1032590_6000_amp --waveglow waveglow_1076430_14000_amp --wn-channels 256 --cpu-run
|
PyTorch/Segmentation/MaskRCNN/pytorch | pytorch | test_fp32 | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#Script for PyT CI
#CONFIG: 1x8x12
RESULTS_DIR='maskrcnn_coco2017_test'
REPO_DIR='/opt/pytorch/examples/Detectron_PyT/pytorch'
CONFIG='configs/e2e_mask_rcnn_R_50_FPN_1x.yaml'
LOGFILE=$REPO_DIR/results/$RESULTS_DIR/log_gpu_0_fp16.log
mkdir -p $REPO_DIR/results/$RESULTS_DIR
GPU=8
BBOX_THRESHOLD=0.375
MASK_THRESHOLD=0.341
THROUGHPUT=1.9
THRESHOLD=0.9
cd $REPO_DIR
python -m torch.distributed.launch --nproc_per_node=$GPU tools/train_net.py \
--config-file $CONFIG \
SOLVER.BASE_LR 0.12 \
SOLVER.MAX_ITER 16667 \
SOLVER.STEPS "(12000, 16000)" \
SOLVER.IMS_PER_BATCH 96 \
TEST.IMS_PER_BATCH 8 \
DTYPE "float32" \
OUTPUT_DIR results/$RESULTS_DIR \
PATHS_CATALOG maskrcnn_benchmark/config/paths_catalog_ci.py \
2>&1 | tee $LOGFILE
map=`cat $LOGFILE | grep -F 'Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ]' | tail -n 2 | awk -F' = ' '{print $2}' | egrep -o [0-9.]+`
bbox_map=`echo $map | awk -F' ' '{print $1}' | egrep -o [0-9.]+`
mask_map=`echo $map | awk -F' ' '{print $2}' | egrep -o [0-9.]+`
time=`cat $LOGFILE | grep -F 'maskrcnn_benchmark.trainer INFO: Total training time' | tail -n 1 | awk -F'(' '{print $2}' | awk -F' s ' '{print $1}' | egrep -o [0-9.]+`
throughput=$(echo $time 1.0 | awk '{ printf "%f", $2 / $1 }')
echo 'THRESHOLD:' $BBOX_THRESHOLD $MASK_THRESHOLD $THROUGHPUT
echo 'RESULT:' $map $throughput
ACCURACY_TEST_RESULT_BBOX=$(awk 'BEGIN {print ('${bbox_map}' >= '${BBOX_THRESHOLD}')}')
ACCURACY_TEST_RESULT_MASK=$(awk 'BEGIN {print ('${mask_map}' >= '${MASK_THRESHOLD}')}')
if [ $ACCURACY_TEST_RESULT_BBOX == 1 -a $ACCURACY_TEST_RESULT_MASK == 1 ];
then
echo "&&&& ACCURACY TEST PASSED"
else
echo "&&&& ACCURACY TEST FAILED"
fi
PERFORMANCE_TEST_RESULT=$(awk 'BEGIN {print ('${throughput}' >= \
('${THROUGHPUT}' * '${THRESHOLD}'))}')
if [ $PERFORMANCE_TEST_RESULT == 1 ];
then
echo "&&&& PERFORMANCE TEST PASSED"
else
echo "&&&& PERFORMANCE TEST FAILED"
fi
if [ $ACCURACY_TEST_RESULT_BBOX == 1 -a $ACCURACY_TEST_RESULT_MASK == 1 -a $PERFORMANCE_TEST_RESULT == 1 ];
then
echo "&&&& PASSED"
exit 0
else
echo "&&&& FAILED"
exit 1
fi
|
Tools/DGLPyTorch/SyntheticGraphGeneration | SyntheticGraphGeneration | requirements | snap-stanford==6.0.0
similaritymeasures==0.6.0
seaborn==0.12.2
ipywidgets==8.0.4
ipython_autotime==0.3.1
scikit-plot>=0.3.7
|
TensorFlow/Detection/SSD/models/research/object_detection/data | data | fgvc_2854_classes_label_map | item {
name: "147457"
id: 1
display_name: "Nicrophorus tomentosus"
}
item {
name: "81923"
id: 2
display_name: "Halyomorpha halys"
}
item {
name: "7"
id: 3
display_name: "Aramus guarauna"
}
item {
name: "201041"
id: 4
display_name: "Rupornis magnirostris"
}
item {
name: "65551"
id: 5
display_name: "Hyla eximia"
}
item {
name: "106516"
id: 6
display_name: "Nannothemis bella"
}
item {
name: "154287"
id: 7
display_name: "Acalymma vittatum"
}
item {
name: "32798"
id: 8
display_name: "Ramphotyphlops braminus"
}
item {
name: "8229"
id: 9
display_name: "Cyanocitta cristata"
}
item {
name: "73766"
id: 10
display_name: "Drymarchon melanurus"
}
item {
name: "409639"
id: 11
display_name: "Aenetus virescens"
}
item {
name: "8234"
id: 12
display_name: "Cyanocitta stelleri"
}
item {
name: "228593"
id: 13
display_name: "Polygrammate hebraeicum"
}
item {
name: "53"
id: 14
display_name: "Balearica regulorum"
}
item {
name: "57399"
id: 15
display_name: "Fistularia commersonii"
}
item {
name: "81979"
id: 16
display_name: "Syritta pipiens"
}
item {
name: "73788"
id: 17
display_name: "Plestiodon fasciatus"
}
item {
name: "73790"
id: 18
display_name: "Plestiodon inexpectatus"
}
item {
name: "16447"
id: 19
display_name: "Pyrocephalus rubinus"
}
item {
name: "73792"
id: 20
display_name: "Plestiodon laticeps"
}
item {
name: "49219"
id: 21
display_name: "Anguilla rostrata"
}
item {
name: "73797"
id: 22
display_name: "Plestiodon obsoletus"
}
item {
name: "73803"
id: 23
display_name: "Plestiodon tetragrammus"
}
item {
name: "122956"
id: 24
display_name: "Syntomoides imaon"
}
item {
name: "82003"
id: 25
display_name: "Arion ater"
}
item {
name: "32854"
id: 26
display_name: "Chamaeleo dilepis"
}
item {
name: "42341"
id: 27
display_name: "Tragelaphus scriptus"
}
item {
name: "82018"
id: 28
display_name: "Taeniopoda eques"
}
item {
name: "57443"
id: 29
display_name: "Libellula quadrimaculata"
}
item {
name: "4885"
id: 30
display_name: "Recurvirostra americana"
}
item {
name: "178403"
id: 31
display_name: "Phalaenophana pyramusalis"
}
item {
name: "135027"
id: 32
display_name: "Agalychnis dacnicolor"
}
item {
name: "49262"
id: 33
display_name: "Haemulon sciurus"
}
item {
name: "98417"
id: 34
display_name: "Cordulegaster diastatops"
}
item {
name: "57458"
id: 35
display_name: "Ladona julia"
}
item {
name: "115"
id: 36
display_name: "Ardeotis kori"
}
item {
name: "49269"
id: 37
display_name: "Diodon holocanthus"
}
item {
name: "57463"
id: 38
display_name: "Papilio canadensis"
}
item {
name: "82043"
id: 39
display_name: "Monochamus scutellatus"
}
item {
name: "147580"
id: 40
display_name: "Ceratotherium simum simum"
}
item {
name: "98430"
id: 41
display_name: "Cordulia shurtleffii"
}
item {
name: "8319"
id: 42
display_name: "Pica nuttalli"
}
item {
name: "43712"
id: 43
display_name: "Dasyprocta punctata"
}
item {
name: "8335"
id: 44
display_name: "Perisoreus canadensis"
}
item {
name: "508048"
id: 45
display_name: "Antigone canadensis"
}
item {
name: "49297"
id: 46
display_name: "Aetobatus narinari"
}
item {
name: "82069"
id: 47
display_name: "Phyciodes pulchella"
}
item {
name: "73149"
id: 48
display_name: "Parkesia noveboracensis"
}
item {
name: "180379"
id: 49
display_name: "Ardea herodias occidentalis"
}
item {
name: "73884"
id: 50
display_name: "Pantherophis emoryi"
}
item {
name: "106653"
id: 51
display_name: "Nehalennia irene"
}
item {
name: "73887"
id: 52
display_name: "Pantherophis guttatus"
}
item {
name: "73888"
id: 53
display_name: "Pantherophis obsoletus"
}
item {
name: "162"
id: 54
display_name: "Porzana carolina"
}
item {
name: "245925"
id: 55
display_name: "Siproeta stelenes biplagiata"
}
item {
name: "117302"
id: 56
display_name: "Physalia physalis"
}
item {
name: "57516"
id: 57
display_name: "Bombus terrestris"
}
item {
name: "204995"
id: 58
display_name: "Anas platyrhynchos diazi"
}
item {
name: "49348"
id: 59
display_name: "Hyles lineata"
}
item {
name: "82117"
id: 60
display_name: "Dolomedes tenebrosus"
}
item {
name: "114891"
id: 61
display_name: "Varanus salvator"
}
item {
name: "319695"
id: 62
display_name: "Epilachna mexicana"
}
item {
name: "41168"
id: 63
display_name: "Desmodus rotundus"
}
item {
name: "13688"
id: 64
display_name: "Motacilla cinerea"
}
item {
name: "57556"
id: 65
display_name: "Papio ursinus"
}
item {
name: "16598"
id: 66
display_name: "Empidonax difficilis"
}
item {
name: "16602"
id: 67
display_name: "Empidonax minimus"
}
item {
name: "16604"
id: 68
display_name: "Empidonax fulvifrons"
}
item {
name: "409181"
id: 69
display_name: "Trite planiceps"
}
item {
name: "82144"
id: 70
display_name: "Hemileuca eglanterina"
}
item {
name: "16611"
id: 71
display_name: "Empidonax traillii"
}
item {
name: "82153"
id: 72
display_name: "Ceratomia undulosa"
}
item {
name: "82155"
id: 73
display_name: "Bittacomorpha clavipes"
}
item {
name: "205036"
id: 74
display_name: "Xanthorhoe lacustrata"
}
item {
name: "16624"
id: 75
display_name: "Empidonax hammondii"
}
item {
name: "16625"
id: 76
display_name: "Empidonax occidentalis"
}
item {
name: "243"
id: 77
display_name: "Rallus limicola"
}
item {
name: "41"
id: 78
display_name: "Grus grus"
}
item {
name: "49402"
id: 79
display_name: "Abudefduf saxatilis"
}
item {
name: "58550"
id: 80
display_name: "Callophrys niphon"
}
item {
name: "205055"
id: 81
display_name: "Zopherus nodulosus haldemani"
}
item {
name: "82177"
id: 82
display_name: "Hermetia illucens"
}
item {
name: "9601"
id: 83
display_name: "Quiscalus major"
}
item {
name: "7101"
id: 84
display_name: "Branta leucopsis"
}
item {
name: "8470"
id: 85
display_name: "Cyanocorax yucatanicus"
}
item {
name: "74009"
id: 86
display_name: "Zamenis longissimus"
}
item {
name: "8474"
id: 87
display_name: "Cyanocorax yncas"
}
item {
name: "82204"
id: 88
display_name: "Nadata gibbosa"
}
item {
name: "123168"
id: 89
display_name: "Ensatina eschscholtzii xanthoptica"
}
item {
name: "82210"
id: 90
display_name: "Heterocampa biundata"
}
item {
name: "48284"
id: 91
display_name: "Oniscus asellus"
}
item {
name: "4146"
id: 92
display_name: "Oceanites oceanicus"
}
item {
name: "82225"
id: 93
display_name: "Lophocampa caryae"
}
item {
name: "9609"
id: 94
display_name: "Quiscalus niger"
}
item {
name: "65849"
id: 95
display_name: "Incilius nebulifer"
}
item {
name: "207583"
id: 96
display_name: "Miomantis caffra"
}
item {
name: "491839"
id: 97
display_name: "Pyrausta insequalis"
}
item {
name: "74048"
id: 98
display_name: "Alces americanus"
}
item {
name: "57665"
id: 99
display_name: "Cotinis mutabilis"
}
item {
name: "65860"
id: 100
display_name: "Incilius valliceps"
}
item {
name: "52911"
id: 101
display_name: "Dolichovespula maculata"
}
item {
name: "8524"
id: 102
display_name: "Psilorhinus morio"
}
item {
name: "49491"
id: 103
display_name: "Thalassoma bifasciatum"
}
item {
name: "41301"
id: 104
display_name: "Tadarida brasiliensis"
}
item {
name: "57687"
id: 105
display_name: "Xylocopa varipuncta"
}
item {
name: "57689"
id: 106
display_name: "Bombus vosnesenskii"
}
item {
name: "57690"
id: 107
display_name: "Bombus sonorus"
}
item {
name: "33118"
id: 108
display_name: "Basiliscus vittatus"
}
item {
name: "205151"
id: 109
display_name: "Phlogophora meticulosa"
}
item {
name: "49504"
id: 110
display_name: "Callinectes sapidus"
}
item {
name: "16737"
id: 111
display_name: "Megarynchus pitangua"
}
item {
name: "357"
id: 112
display_name: "Gallinula tenebrosa"
}
item {
name: "82278"
id: 113
display_name: "Ameiurus melas"
}
item {
name: "82279"
id: 114
display_name: "Automeris io"
}
item {
name: "505478"
id: 115
display_name: "Gallus gallus domesticus"
}
item {
name: "33135"
id: 116
display_name: "Crotaphytus collaris"
}
item {
name: "41328"
id: 117
display_name: "Lavia frons"
}
item {
name: "196979"
id: 118
display_name: "Anaxyrus boreas halophilus"
}
item {
name: "44902"
id: 119
display_name: "Sigmodon hispidus"
}
item {
name: "1428"
id: 120
display_name: "Numida meleagris"
}
item {
name: "119153"
id: 121
display_name: "Junco hyemalis caniceps"
}
item {
name: "49539"
id: 122
display_name: "Pisaster brevispinus"
}
item {
name: "328068"
id: 123
display_name: "Belocaulus angustipes"
}
item {
name: "120214"
id: 124
display_name: "Clostera albosigma"
}
item {
name: "16779"
id: 125
display_name: "Tyrannus vociferans"
}
item {
name: "16782"
id: 126
display_name: "Tyrannus tyrannus"
}
item {
name: "16783"
id: 127
display_name: "Tyrannus forficatus"
}
item {
name: "16784"
id: 128
display_name: "Tyrannus crassirostris"
}
item {
name: "57745"
id: 129
display_name: "Linckia laevigata"
}
item {
name: "205202"
id: 130
display_name: "Ecliptopera silaceata"
}
item {
name: "205203"
id: 131
display_name: "Dyspteris abortivaria"
}
item {
name: "16791"
id: 132
display_name: "Tyrannus verticalis"
}
item {
name: "16793"
id: 133
display_name: "Tyrannus savana"
}
item {
name: "205213"
id: 134
display_name: "Caripeta divisata"
}
item {
name: "49566"
id: 135
display_name: "Cicindela sexguttata"
}
item {
name: "491935"
id: 136
display_name: "Thylacodes squamigerus"
}
item {
name: "205216"
id: 137
display_name: "Cerma cerintha"
}
item {
name: "39665"
id: 138
display_name: "Caretta caretta"
}
item {
name: "147881"
id: 139
display_name: "Trichechus manatus latirostris"
}
item {
name: "28743"
id: 140
display_name: "Salvadora hexalepis"
}
item {
name: "205231"
id: 141
display_name: "Idaea dimidiata"
}
item {
name: "205233"
id: 142
display_name: "Iridopsis larvaria"
}
item {
name: "205235"
id: 143
display_name: "Leuconycta diphteroides"
}
item {
name: "436"
id: 144
display_name: "Gallirallus australis"
}
item {
name: "205238"
id: 145
display_name: "Metanema inatomaria"
}
item {
name: "49591"
id: 146
display_name: "Lepomis macrochirus"
}
item {
name: "229817"
id: 147
display_name: "Raphia frater"
}
item {
name: "49594"
id: 148
display_name: "Pomoxis nigromaculatus"
}
item {
name: "65979"
id: 149
display_name: "Lithobates catesbeianus"
}
item {
name: "49596"
id: 150
display_name: "Salvelinus fontinalis"
}
item {
name: "65982"
id: 151
display_name: "Lithobates clamitans"
}
item {
name: "8649"
id: 152
display_name: "Calocitta formosa"
}
item {
name: "8650"
id: 153
display_name: "Calocitta colliei"
}
item {
name: "82379"
id: 154
display_name: "Hemaris thysbe"
}
item {
name: "49614"
id: 155
display_name: "Lepomis gibbosus"
}
item {
name: "63028"
id: 156
display_name: "Hypercompe scribonia"
}
item {
name: "39672"
id: 157
display_name: "Eretmochelys imbricata"
}
item {
name: "66003"
id: 158
display_name: "Lithobates pipiens"
}
item {
name: "197077"
id: 159
display_name: "Vanessa kershawi"
}
item {
name: "473"
id: 160
display_name: "Fulica americana"
}
item {
name: "147930"
id: 161
display_name: "Rabidosa rabida"
}
item {
name: "147931"
id: 162
display_name: "Panoquina ocola"
}
item {
name: "66012"
id: 163
display_name: "Lithobates sylvaticus"
}
item {
name: "8671"
id: 164
display_name: "Pachyramphus aglaiae"
}
item {
name: "41440"
id: 165
display_name: "Phocoena phocoena"
}
item {
name: "27388"
id: 166
display_name: "Carphophis amoenus"
}
item {
name: "82418"
id: 167
display_name: "Cicindela punctulata"
}
item {
name: "25078"
id: 168
display_name: "Gastrophryne carolinensis"
}
item {
name: "82425"
id: 169
display_name: "Cicindela repanda"
}
item {
name: "143446"
id: 170
display_name: "Paonias myops"
}
item {
name: "41478"
id: 171
display_name: "Eschrichtius robustus"
}
item {
name: "5200"
id: 172
display_name: "Buteo lagopus"
}
item {
name: "148908"
id: 173
display_name: "Chrysodeixis includens"
}
item {
name: "41482"
id: 174
display_name: "Tursiops truncatus"
}
item {
name: "6914"
id: 175
display_name: "Cygnus atratus"
}
item {
name: "464301"
id: 176
display_name: "Philesturnus rufusater"
}
item {
name: "129226"
id: 177
display_name: "Chytolita morbidalis"
}
item {
name: "180759"
id: 178
display_name: "Aphonopelma iodius"
}
item {
name: "135318"
id: 179
display_name: "Apantesis phalerata"
}
item {
name: "49699"
id: 180
display_name: "Pisaster ochraceus"
}
item {
name: "49700"
id: 181
display_name: "Coluber lateralis lateralis"
}
item {
name: "61532"
id: 182
display_name: "Propylea quatuordecimpunctata"
}
item {
name: "4368"
id: 183
display_name: "Larus marinus"
}
item {
name: "41521"
id: 184
display_name: "Orcinus orca"
}
item {
name: "49716"
id: 185
display_name: "Paonias excaecata"
}
item {
name: "41526"
id: 186
display_name: "Delphinus delphis"
}
item {
name: "49723"
id: 187
display_name: "Pugettia producta"
}
item {
name: "16956"
id: 188
display_name: "Pitangus sulphuratus"
}
item {
name: "210607"
id: 189
display_name: "Diastictis fracturalis"
}
item {
name: "148030"
id: 190
display_name: "Equus asinus"
}
item {
name: "6924"
id: 191
display_name: "Anas rubripes"
}
item {
name: "30844"
id: 192
display_name: "Bothriechis schlegelii"
}
item {
name: "123628"
id: 193
display_name: "Argynnis paphia"
}
item {
name: "131676"
id: 194
display_name: "Anthus novaeseelandiae novaeseelandiae"
}
item {
name: "41566"
id: 195
display_name: "Megaptera novaeangliae"
}
item {
name: "49759"
id: 196
display_name: "Pyrgus oileus"
}
item {
name: "49761"
id: 197
display_name: "Anartia jatrophae"
}
item {
name: "49766"
id: 198
display_name: "Heliconius charithonia"
}
item {
name: "33383"
id: 199
display_name: "Coleonyx brevis"
}
item {
name: "33384"
id: 200
display_name: "Coleonyx elegans"
}
item {
name: "312764"
id: 201
display_name: "Euptoieta hegesia meridiania"
}
item {
name: "82538"
id: 202
display_name: "Vanessa gonerilla"
}
item {
name: "33387"
id: 203
display_name: "Coleonyx variegatus"
}
item {
name: "56082"
id: 204
display_name: "Aeshna canadensis"
}
item {
name: "17008"
id: 205
display_name: "Sayornis phoebe"
}
item {
name: "200808"
id: 206
display_name: "Sceloporus graciosus vandenburgianus"
}
item {
name: "17013"
id: 207
display_name: "Sayornis nigricans"
}
item {
name: "122381"
id: 208
display_name: "Cupido comyntas"
}
item {
name: "123516"
id: 209
display_name: "Mydas clavatus"
}
item {
name: "8834"
id: 210
display_name: "Tityra semifasciata"
}
item {
name: "146199"
id: 211
display_name: "Lampropeltis californiae"
}
item {
name: "17858"
id: 212
display_name: "Dryocopus lineatus"
}
item {
name: "334616"
id: 213
display_name: "Battus philenor hirsuta"
}
item {
name: "82582"
id: 214
display_name: "Labidomera clivicollis"
}
item {
name: "204699"
id: 215
display_name: "Pseudothyatira cymatophoroides"
}
item {
name: "41638"
id: 216
display_name: "Ursus americanus"
}
item {
name: "27420"
id: 217
display_name: "Desmognathus fuscus"
}
item {
name: "81584"
id: 218
display_name: "Anisota virginiensis"
}
item {
name: "49848"
id: 219
display_name: "Navanax inermis"
}
item {
name: "143476"
id: 220
display_name: "Calledapteryx dryopterata"
}
item {
name: "41663"
id: 221
display_name: "Procyon lotor"
}
item {
name: "49857"
id: 222
display_name: "Aplysia vaccaria"
}
item {
name: "41673"
id: 223
display_name: "Nasua narica"
}
item {
name: "41676"
id: 224
display_name: "Bassariscus astutus"
}
item {
name: "27427"
id: 225
display_name: "Aneides lugubris"
}
item {
name: "418530"
id: 226
display_name: "Porphyrio melanotus"
}
item {
name: "311419"
id: 227
display_name: "Neobernaya spadicea"
}
item {
name: "113502"
id: 228
display_name: "Sympetrum costiferum"
}
item {
name: "66278"
id: 229
display_name: "Oophaga pumilio"
}
item {
name: "6951"
id: 230
display_name: "Anas bahamensis"
}
item {
name: "213740"
id: 231
display_name: "Antaeotricha schlaegeri"
}
item {
name: "143485"
id: 232
display_name: "Xanthorhoe ferrugata"
}
item {
name: "120275"
id: 233
display_name: "Euphyia intermediata"
}
item {
name: "48035"
id: 234
display_name: "Strongylocentrotus purpuratus"
}
item {
name: "41728"
id: 235
display_name: "Mirounga angustirostris"
}
item {
name: "41733"
id: 236
display_name: "Halichoerus grypus"
}
item {
name: "41740"
id: 237
display_name: "Zalophus californianus"
}
item {
name: "118914"
id: 238
display_name: "Echinargus isola"
}
item {
name: "4936"
id: 239
display_name: "Egretta novaehollandiae"
}
item {
name: "131862"
id: 240
display_name: "Typocerus velutinus"
}
item {
name: "55401"
id: 241
display_name: "Pieris brassicae"
}
item {
name: "41752"
id: 242
display_name: "Arctocephalus forsteri"
}
item {
name: "41755"
id: 243
display_name: "Eumetopias jubatus"
}
item {
name: "123676"
id: 244
display_name: "Anas crecca carolinensis"
}
item {
name: "41763"
id: 245
display_name: "Phocarctos hookeri"
}
item {
name: "181034"
id: 246
display_name: "Cervus elaphus canadensis"
}
item {
name: "49964"
id: 247
display_name: "Ginglymostoma cirratum"
}
item {
name: "213809"
id: 248
display_name: "Anticarsia gemmatalis"
}
item {
name: "49972"
id: 249
display_name: "Battus philenor"
}
item {
name: "205623"
id: 250
display_name: "Microstylum morosum"
}
item {
name: "336697"
id: 251
display_name: "Arctia villica"
}
item {
name: "41789"
id: 252
display_name: "Taxidea taxus"
}
item {
name: "48724"
id: 253
display_name: "Phidiana hiltoni"
}
item {
name: "123713"
id: 254
display_name: "Neoscona oaxacensis"
}
item {
name: "33602"
id: 255
display_name: "Tarentola mauritanica"
}
item {
name: "846"
id: 256
display_name: "Alectoris chukar"
}
item {
name: "41808"
id: 257
display_name: "Mustela erminea"
}
item {
name: "50001"
id: 258
display_name: "Terrapene carolina carolina"
}
item {
name: "41810"
id: 259
display_name: "Mustela frenata"
}
item {
name: "82774"
id: 260
display_name: "Oryctes nasicornis"
}
item {
name: "41815"
id: 261
display_name: "Mustela nivalis"
}
item {
name: "4239"
id: 262
display_name: "Tachybaptus dominicus"
}
item {
name: "344926"
id: 263
display_name: "Artemisiospiza belli"
}
item {
name: "82792"
id: 264
display_name: "Celastrina neglecta"
}
item {
name: "41841"
id: 265
display_name: "Meles meles"
}
item {
name: "882"
id: 266
display_name: "Gallus gallus"
}
item {
name: "125758"
id: 267
display_name: "Mercenaria mercenaria"
}
item {
name: "9081"
id: 268
display_name: "Cardinalis sinuatus"
}
item {
name: "9083"
id: 269
display_name: "Cardinalis cardinalis"
}
item {
name: "9092"
id: 270
display_name: "Melospiza lincolnii"
}
item {
name: "4246"
id: 271
display_name: "Podilymbus podiceps"
}
item {
name: "9096"
id: 272
display_name: "Melospiza georgiana"
}
item {
name: "906"
id: 273
display_name: "Meleagris gallopavo"
}
item {
name: "50059"
id: 274
display_name: "Limacia cockerelli"
}
item {
name: "394124"
id: 275
display_name: "Orthodera novaezealandiae"
}
item {
name: "82832"
id: 276
display_name: "Cosmopepla lintneriana"
}
item {
name: "913"
id: 277
display_name: "Meleagris ocellata"
}
item {
name: "41877"
id: 278
display_name: "Conepatus leuconotus"
}
item {
name: "196419"
id: 279
display_name: "Euborellia annulipes"
}
item {
name: "50071"
id: 280
display_name: "Erynnis horatius"
}
item {
name: "41880"
id: 281
display_name: "Mephitis mephitis"
}
item {
name: "50073"
id: 282
display_name: "Dryas iulia"
}
item {
name: "173793"
id: 283
display_name: "Diphthera festiva"
}
item {
name: "41886"
id: 284
display_name: "Crocuta crocuta"
}
item {
name: "30683"
id: 285
display_name: "Agkistrodon contortrix contortrix"
}
item {
name: "931"
id: 286
display_name: "Lagopus lagopus"
}
item {
name: "41901"
id: 287
display_name: "Herpestes javanicus"
}
item {
name: "143517"
id: 288
display_name: "Biston betularia"
}
item {
name: "9139"
id: 289
display_name: "Spizella atrogularis"
}
item {
name: "8350"
id: 290
display_name: "Pyrrhocorax graculus"
}
item {
name: "9144"
id: 291
display_name: "Spizella breweri"
}
item {
name: "12936"
id: 292
display_name: "Sialia currucoides"
}
item {
name: "9152"
id: 293
display_name: "Spizella pusilla"
}
item {
name: "68229"
id: 294
display_name: "Tramea carolina"
}
item {
name: "6987"
id: 295
display_name: "Anas superciliosa"
}
item {
name: "9156"
id: 296
display_name: "Passerella iliaca"
}
item {
name: "202315"
id: 297
display_name: "Romaleon antennarium"
}
item {
name: "4257"
id: 298
display_name: "Phoenicopterus ruber"
}
item {
name: "25545"
id: 299
display_name: "Rana aurora"
}
item {
name: "15282"
id: 300
display_name: "Sylvia atricapilla"
}
item {
name: "103927"
id: 301
display_name: "Ladona deplanata"
}
item {
name: "17356"
id: 302
display_name: "Vireo bellii"
}
item {
name: "26765"
id: 303
display_name: "Ambystoma mavortium"
}
item {
name: "205777"
id: 304
display_name: "Plectrodera scalator"
}
item {
name: "17362"
id: 305
display_name: "Vireo plumbeus"
}
item {
name: "99283"
id: 306
display_name: "Didymops transversa"
}
item {
name: "17364"
id: 307
display_name: "Vireo philadelphicus"
}
item {
name: "17365"
id: 308
display_name: "Vireo flavifrons"
}
item {
name: "17366"
id: 309
display_name: "Vireo olivaceus"
}
item {
name: "9182"
id: 310
display_name: "Zonotrichia querula"
}
item {
name: "17375"
id: 311
display_name: "Vireo huttoni"
}
item {
name: "9184"
id: 312
display_name: "Zonotrichia albicollis"
}
item {
name: "9185"
id: 313
display_name: "Zonotrichia atricapilla"
}
item {
name: "50147"
id: 314
display_name: "Celithemis eponina"
}
item {
name: "47585"
id: 315
display_name: "Crassostrea virginica"
}
item {
name: "9195"
id: 316
display_name: "Emberiza citrinella"
}
item {
name: "41964"
id: 317
display_name: "Panthera leo"
}
item {
name: "6994"
id: 318
display_name: "Bucephala islandica"
}
item {
name: "52506"
id: 319
display_name: "Adalia bipunctata"
}
item {
name: "9201"
id: 320
display_name: "Emberiza schoeniclus"
}
item {
name: "17394"
id: 321
display_name: "Vireo gilvus"
}
item {
name: "25591"
id: 322
display_name: "Rana temporaria"
}
item {
name: "41976"
id: 323
display_name: "Lynx rufus"
}
item {
name: "214015"
id: 324
display_name: "Apoda y-inversum"
}
item {
name: "50176"
id: 325
display_name: "Enallagma vesperum"
}
item {
name: "99331"
id: 326
display_name: "Diplacodes trivialis"
}
item {
name: "50181"
id: 327
display_name: "Loxosceles reclusa"
}
item {
name: "74758"
id: 328
display_name: "Neovison vison"
}
item {
name: "123912"
id: 329
display_name: "Charaxes jasius"
}
item {
name: "41997"
id: 330
display_name: "Leopardus pardalis"
}
item {
name: "123920"
id: 331
display_name: "Dorcus parallelipipedus"
}
item {
name: "132334"
id: 332
display_name: "Urbanus procne"
}
item {
name: "123922"
id: 333
display_name: "Abudefduf sordidus"
}
item {
name: "9236"
id: 334
display_name: "Serinus serinus"
}
item {
name: "42007"
id: 335
display_name: "Puma concolor"
}
item {
name: "9240"
id: 336
display_name: "Serinus mozambicus"
}
item {
name: "148506"
id: 337
display_name: "Melanis pixe"
}
item {
name: "58399"
id: 338
display_name: "Urosalpinx cinerea"
}
item {
name: "312353"
id: 339
display_name: "Leptophobia aripa elodia"
}
item {
name: "148517"
id: 340
display_name: "Heliopetes laviana"
}
item {
name: "73905"
id: 341
display_name: "Phrynosoma cornutum"
}
item {
name: "39772"
id: 342
display_name: "Chrysemys picta marginata"
}
item {
name: "25646"
id: 343
display_name: "Rana boylii"
}
item {
name: "62984"
id: 344
display_name: "Aedes albopictus"
}
item {
name: "123959"
id: 345
display_name: "Ensatina eschscholtzii oregonensis"
}
item {
name: "1081"
id: 346
display_name: "Lophura leucomelanos"
}
item {
name: "39775"
id: 347
display_name: "Chrysemys picta picta"
}
item {
name: "42046"
id: 348
display_name: "Canis mesomelas"
}
item {
name: "42048"
id: 349
display_name: "Canis lupus"
}
item {
name: "42051"
id: 350
display_name: "Canis latrans"
}
item {
name: "9284"
id: 351
display_name: "Euphonia elegantissima"
}
item {
name: "25669"
id: 352
display_name: "Rana dalmatina"
}
item {
name: "9287"
id: 353
display_name: "Euphonia hirundinacea"
}
item {
name: "9291"
id: 354
display_name: "Euphonia affinis"
}
item {
name: "222284"
id: 355
display_name: "Iridopsis defectaria"
}
item {
name: "74832"
id: 356
display_name: "Papio anubis"
}
item {
name: "148563"
id: 357
display_name: "Myscelia ethusa"
}
item {
name: "42069"
id: 358
display_name: "Vulpes vulpes"
}
item {
name: "9743"
id: 359
display_name: "Agelaius tricolor"
}
item {
name: "42076"
id: 360
display_name: "Urocyon cinereoargenteus"
}
item {
name: "509025"
id: 361
display_name: "Momotus lessonii"
}
item {
name: "17506"
id: 362
display_name: "Zosterops japonicus"
}
item {
name: "4283"
id: 363
display_name: "Phalacrocorax pelagicus"
}
item {
name: "58469"
id: 364
display_name: "Thorybes pylades"
}
item {
name: "9319"
id: 365
display_name: "Icterus cucullatus"
}
item {
name: "58473"
id: 366
display_name: "Erynnis icelus"
}
item {
name: "58475"
id: 367
display_name: "Erynnis juvenalis"
}
item {
name: "42093"
id: 368
display_name: "Lycaon pictus"
}
item {
name: "58478"
id: 369
display_name: "Erynnis baptisiae"
}
item {
name: "9328"
id: 370
display_name: "Icterus graduacauda"
}
item {
name: "58481"
id: 371
display_name: "Ancyloxypha numitor"
}
item {
name: "132210"
id: 372
display_name: "Deloyala guttata"
}
item {
name: "58484"
id: 373
display_name: "Thymelicus lineola"
}
item {
name: "13701"
id: 374
display_name: "Motacilla aguimp"
}
item {
name: "410743"
id: 375
display_name: "Anas superciliosa \303\227 platyrhynchos"
}
item {
name: "9336"
id: 376
display_name: "Icterus pustulatus"
}
item {
name: "9339"
id: 377
display_name: "Icterus gularis"
}
item {
name: "124031"
id: 378
display_name: "Agrius convolvuli"
}
item {
name: "42113"
id: 379
display_name: "Pecari tajacu"
}
item {
name: "132227"
id: 380
display_name: "Lethe appalachia"
}
item {
name: "113516"
id: 381
display_name: "Sympetrum madidum"
}
item {
name: "58509"
id: 382
display_name: "Anatrytone logan"
}
item {
name: "83086"
id: 383
display_name: "Eurytides marcellus"
}
item {
name: "58511"
id: 384
display_name: "Poanes viator"
}
item {
name: "83090"
id: 385
display_name: "Epimecis hortaria"
}
item {
name: "115859"
id: 386
display_name: "Micrurus tener tener"
}
item {
name: "129902"
id: 387
display_name: "Camponotus pennsylvanicus"
}
item {
name: "42134"
id: 388
display_name: "Sus scrofa"
}
item {
name: "58519"
id: 389
display_name: "Pompeius verna"
}
item {
name: "205977"
id: 390
display_name: "Coccinella undecimpunctata"
}
item {
name: "58523"
id: 391
display_name: "Papilio polyxenes"
}
item {
name: "58525"
id: 392
display_name: "Papilio troilus"
}
item {
name: "410783"
id: 393
display_name: "Hypoblemum albovittatum"
}
item {
name: "9376"
id: 394
display_name: "Carduelis cannabina"
}
item {
name: "58531"
id: 395
display_name: "Colias philodice"
}
item {
name: "50340"
id: 396
display_name: "Hylephila phyleus"
}
item {
name: "42149"
id: 397
display_name: "Hippopotamus amphibius"
}
item {
name: "50342"
id: 398
display_name: "Erythrodiplax umbrata"
}
item {
name: "12883"
id: 399
display_name: "Catharus minimus"
}
item {
name: "28557"
id: 400
display_name: "Storeria occipitomaculata"
}
item {
name: "199"
id: 401
display_name: "Amaurornis phoenicurus"
}
item {
name: "58541"
id: 402
display_name: "Satyrium liparops"
}
item {
name: "58543"
id: 403
display_name: "Callophrys augustinus"
}
item {
name: "42161"
id: 404
display_name: "Dama dama"
}
item {
name: "61508"
id: 405
display_name: "Ischnura elegans"
}
item {
name: "1204"
id: 406
display_name: "Pavo cristatus"
}
item {
name: "42166"
id: 407
display_name: "Axis axis"
}
item {
name: "146797"
id: 408
display_name: "Platynota idaeusalis"
}
item {
name: "58556"
id: 409
display_name: "Celastrina ladon"
}
item {
name: "367477"
id: 410
display_name: "Rallus crepitans"
}
item {
name: "58561"
id: 411
display_name: "Libytheana carinenta"
}
item {
name: "58563"
id: 412
display_name: "Speyeria aphrodite"
}
item {
name: "58564"
id: 413
display_name: "Boloria bellona"
}
item {
name: "413489"
id: 414
display_name: "Nestor meridionalis septentrionalis"
}
item {
name: "42184"
id: 415
display_name: "Capreolus capreolus"
}
item {
name: "9419"
id: 416
display_name: "Pipilo chlorurus"
}
item {
name: "9420"
id: 417
display_name: "Pipilo maculatus"
}
item {
name: "9424"
id: 418
display_name: "Pipilo erythrophthalmus"
}
item {
name: "99539"
id: 419
display_name: "Dorocordulia libera"
}
item {
name: "58580"
id: 420
display_name: "Polygonia progne"
}
item {
name: "58581"
id: 421
display_name: "Nymphalis vaualbum"
}
item {
name: "42199"
id: 422
display_name: "Rangifer tarandus"
}
item {
name: "58586"
id: 423
display_name: "Limenitis archippus"
}
item {
name: "58587"
id: 424
display_name: "Asterocampa clyton"
}
item {
name: "42206"
id: 425
display_name: "Cervus elaphus"
}
item {
name: "312543"
id: 426
display_name: "Anartia jatrophae luteipicta"
}
item {
name: "204094"
id: 427
display_name: "Cairina moschata domestica"
}
item {
name: "4304"
id: 428
display_name: "Phalacrocorax varius"
}
item {
name: "42210"
id: 429
display_name: "Cervus nippon"
}
item {
name: "17638"
id: 430
display_name: "Picoides dorsalis"
}
item {
name: "132330"
id: 431
display_name: "Chlosyne janais"
}
item {
name: "58603"
id: 432
display_name: "Megisto cymela"
}
item {
name: "42220"
id: 433
display_name: "Odocoileus hemionus"
}
item {
name: "17645"
id: 434
display_name: "Picoides nuttallii"
}
item {
name: "58606"
id: 435
display_name: "Cercyonis pegala"
}
item {
name: "42223"
id: 436
display_name: "Odocoileus virginianus"
}
item {
name: "58609"
id: 437
display_name: "Lepisosteus osseus"
}
item {
name: "17650"
id: 438
display_name: "Picoides scalaris"
}
item {
name: "132339"
id: 439
display_name: "Anthanassa texana"
}
item {
name: "58612"
id: 440
display_name: "Carassius auratus"
}
item {
name: "1406"
id: 441
display_name: "Callipepla gambelii"
}
item {
name: "9462"
id: 442
display_name: "Pyrrhula pyrrhula"
}
item {
name: "4308"
id: 443
display_name: "Phalacrocorax brasilianus"
}
item {
name: "17660"
id: 444
display_name: "Picoides pubescens"
}
item {
name: "1280"
id: 445
display_name: "Colinus virginianus"
}
item {
name: "129920"
id: 446
display_name: "Calliostoma ligatum"
}
item {
name: "58627"
id: 447
display_name: "Perca flavescens"
}
item {
name: "148742"
id: 448
display_name: "Hamadryas februa"
}
item {
name: "39809"
id: 449
display_name: "Terrapene ornata ornata"
}
item {
name: "115979"
id: 450
display_name: "Plestiodon skiltonianus skiltonianus"
}
item {
name: "9484"
id: 451
display_name: "Sporophila torqueola"
}
item {
name: "17678"
id: 452
display_name: "Picoides villosus"
}
item {
name: "3862"
id: 453
display_name: "Calidris pusilla"
}
item {
name: "70421"
id: 454
display_name: "Acris blanchardi"
}
item {
name: "124183"
id: 455
display_name: "Phlogophora periculosa"
}
item {
name: "124184"
id: 456
display_name: "Plodia interpunctella"
}
item {
name: "99609"
id: 457
display_name: "Dromogomphus spinosus"
}
item {
name: "99610"
id: 458
display_name: "Dromogomphus spoliatus"
}
item {
name: "17694"
id: 459
display_name: "Picoides arcticus"
}
item {
name: "113521"
id: 460
display_name: "Sympetrum pallipes"
}
item {
name: "320801"
id: 461
display_name: "Aspidoscelis tesselata"
}
item {
name: "7047"
id: 462
display_name: "Aythya marila"
}
item {
name: "4317"
id: 463
display_name: "Phaethon aethereus"
}
item {
name: "81606"
id: 464
display_name: "Littorina littorea"
}
item {
name: "99891"
id: 465
display_name: "Enallagma aspersum"
}
item {
name: "9528"
id: 466
display_name: "Sturnella magna"
}
item {
name: "99641"
id: 467
display_name: "Dythemis fugax"
}
item {
name: "99644"
id: 468
display_name: "Dythemis nigrescens"
}
item {
name: "39818"
id: 469
display_name: "Terrapene carolina triunguis"
}
item {
name: "99647"
id: 470
display_name: "Dythemis velox"
}
item {
name: "148800"
id: 471
display_name: "Chioides albofasciatus"
}
item {
name: "19339"
id: 472
display_name: "Melopsittacus undulatus"
}
item {
name: "47509"
id: 473
display_name: "Diaulula sandiegensis"
}
item {
name: "148810"
id: 474
display_name: "Anaea aidea"
}
item {
name: "123070"
id: 475
display_name: "Capra hircus"
}
item {
name: "7054"
id: 476
display_name: "Aythya affinis"
}
item {
name: "99897"
id: 477
display_name: "Enallagma civile"
}
item {
name: "42328"
id: 478
display_name: "Kobus ellipsiprymnus"
}
item {
name: "48328"
id: 479
display_name: "Aurelia aurita"
}
item {
name: "132445"
id: 480
display_name: "Conchylodes ovulalis"
}
item {
name: "215271"
id: 481
display_name: "Bleptina caradrinalis"
}
item {
name: "83297"
id: 482
display_name: "Scarus rubroviolaceus"
}
item {
name: "42347"
id: 483
display_name: "Rupicapra rupicapra"
}
item {
name: "7058"
id: 484
display_name: "Aythya novaeseelandiae"
}
item {
name: "52457"
id: 485
display_name: "Chaetodon auriga"
}
item {
name: "1392"
id: 486
display_name: "Cyrtonyx montezumae"
}
item {
name: "4328"
id: 487
display_name: "Pelecanus occidentalis"
}
item {
name: "7647"
id: 488
display_name: "Cinclus cinclus"
}
item {
name: "148856"
id: 489
display_name: "Anteos clorinde"
}
item {
name: "7060"
id: 490
display_name: "Chen rossii"
}
item {
name: "58750"
id: 491
display_name: "Nomophila nearctica"
}
item {
name: "1409"
id: 492
display_name: "Callipepla californica"
}
item {
name: "9602"
id: 493
display_name: "Quiscalus quiscula"
}
item {
name: "296326"
id: 494
display_name: "Oncopeltus sexmaculatus"
}
item {
name: "9607"
id: 495
display_name: "Quiscalus mexicanus"
}
item {
name: "319724"
id: 496
display_name: "Euphoria kernii"
}
item {
name: "1419"
id: 497
display_name: "Callipepla squamata"
}
item {
name: "148883"
id: 498
display_name: "Eantis tamenund"
}
item {
name: "42391"
id: 499
display_name: "Ovis canadensis"
}
item {
name: "107937"
id: 500
display_name: "Orthemis discolor"
}
item {
name: "42405"
id: 501
display_name: "Syncerus caffer"
}
item {
name: "42408"
id: 502
display_name: "Bison bison"
}
item {
name: "116137"
id: 503
display_name: "Sceloporus cowlesi"
}
item {
name: "326296"
id: 504
display_name: "Bufo bufo"
}
item {
name: "148907"
id: 505
display_name: "Cydia latiferreana"
}
item {
name: "42414"
id: 506
display_name: "Oreamnos americanus"
}
item {
name: "116143"
id: 507
display_name: "Sceloporus tristichus"
}
item {
name: "99912"
id: 508
display_name: "Enallagma geminatum"
}
item {
name: "226889"
id: 509
display_name: "Pangrapta decoralis"
}
item {
name: "42429"
id: 510
display_name: "Antilocapra americana"
}
item {
name: "17855"
id: 511
display_name: "Dryocopus pileatus"
}
item {
name: "107974"
id: 512
display_name: "Orthetrum sabina"
}
item {
name: "56225"
id: 513
display_name: "Polygonia c-album"
}
item {
name: "67016"
id: 514
display_name: "Rana draytonii"
}
item {
name: "132553"
id: 515
display_name: "Strymon istapa"
}
item {
name: "73155"
id: 516
display_name: "Passerina caerulea"
}
item {
name: "26074"
id: 517
display_name: "Crocodylus moreletii"
}
item {
name: "171903"
id: 518
display_name: "Oligyra orbiculata"
}
item {
name: "26085"
id: 519
display_name: "Crocodylus acutus"
}
item {
name: "143613"
id: 520
display_name: "Homophoberia apicosa"
}
item {
name: "5715"
id: 521
display_name: "Amazilia beryllina"
}
item {
name: "9721"
id: 522
display_name: "Geothlypis trichas"
}
item {
name: "154446"
id: 523
display_name: "Lambdina fiscellaria"
}
item {
name: "236841"
id: 524
display_name: "Lichanura orcutti"
}
item {
name: "20737"
id: 525
display_name: "Trogon melanocephalus"
}
item {
name: "124431"
id: 526
display_name: "Cycloneda sanguinea"
}
item {
name: "124432"
id: 527
display_name: "Deroceras reticulatum"
}
item {
name: "39566"
id: 528
display_name: "Apalone ferox"
}
item {
name: "149017"
id: 529
display_name: "Chlorochlamys chloroleucaria"
}
item {
name: "15281"
id: 530
display_name: "Sylvia communis"
}
item {
name: "312873"
id: 531
display_name: "Anartia fatima fatima"
}
item {
name: "9771"
id: 532
display_name: "Pinicola enucleator"
}
item {
name: "39858"
id: 533
display_name: "Graptemys geographica"
}
item {
name: "26159"
id: 534
display_name: "Alligator mississippiensis"
}
item {
name: "304690"
id: 535
display_name: "Naupactus cervinus"
}
item {
name: "124467"
id: 536
display_name: "Pseudosphinx tetrio"
}
item {
name: "99892"
id: 537
display_name: "Enallagma basidens"
}
item {
name: "99895"
id: 538
display_name: "Enallagma carunculatum"
}
item {
name: "67129"
id: 539
display_name: "Rhinella marina"
}
item {
name: "83515"
id: 540
display_name: "Oxybelis aeneus"
}
item {
name: "81681"
id: 541
display_name: "Campaea perlata"
}
item {
name: "99901"
id: 542
display_name: "Enallagma cyathigerum"
}
item {
name: "99911"
id: 543
display_name: "Enallagma exsulans"
}
item {
name: "9800"
id: 544
display_name: "Coccothraustes vespertinus"
}
item {
name: "9801"
id: 545
display_name: "Coccothraustes coccothraustes"
}
item {
name: "154551"
id: 546
display_name: "Leptoglossus zonatus"
}
item {
name: "9807"
id: 547
display_name: "Vermivora chrysoptera"
}
item {
name: "61157"
id: 548
display_name: "Trichodes ornatus"
}
item {
name: "99924"
id: 549
display_name: "Enallagma signatum"
}
item {
name: "1626"
id: 550
display_name: "Opisthocomus hoazin"
}
item {
name: "132704"
id: 551
display_name: "Setophaga coronata coronata"
}
item {
name: "119056"
id: 552
display_name: "Centruroides vittatus"
}
item {
name: "50786"
id: 553
display_name: "Vanessa annabella"
}
item {
name: "60347"
id: 554
display_name: "Pituophis catenifer sayi"
}
item {
name: "9833"
id: 555
display_name: "Diglossa baritula"
}
item {
name: "132718"
id: 556
display_name: "Scathophaga stercoraria"
}
item {
name: "132719"
id: 557
display_name: "Calopteron reticulatum"
}
item {
name: "116340"
id: 558
display_name: "Dreissena polymorpha"
}
item {
name: "134078"
id: 559
display_name: "Scoliopteryx libatrix"
}
item {
name: "9850"
id: 560
display_name: "Saltator coerulescens"
}
item {
name: "117695"
id: 561
display_name: "Cucumaria miniata"
}
item {
name: "9854"
id: 562
display_name: "Saltator atriceps"
}
item {
name: "132736"
id: 563
display_name: "Urola nivalis"
}
item {
name: "34435"
id: 564
display_name: "Hemidactylus turcicus"
}
item {
name: "9864"
id: 565
display_name: "Sicalis flaveola"
}
item {
name: "7106"
id: 566
display_name: "Aix galericulata"
}
item {
name: "485010"
id: 567
display_name: "Chinavia hilaris"
}
item {
name: "132764"
id: 568
display_name: "Junco hyemalis hyemalis"
}
item {
name: "367558"
id: 569
display_name: "Eupsittula canicularis"
}
item {
name: "370351"
id: 570
display_name: "Microcarbo melanoleucos"
}
item {
name: "50867"
id: 571
display_name: "Argiope bruennichi"
}
item {
name: "67252"
id: 572
display_name: "Trachycephalus typhonius"
}
item {
name: "132789"
id: 573
display_name: "Clepsis peritana"
}
item {
name: "9915"
id: 574
display_name: "Piranga rubra"
}
item {
name: "50880"
id: 575
display_name: "Limenitis lorquini"
}
item {
name: "9921"
id: 576
display_name: "Piranga olivacea"
}
item {
name: "100034"
id: 577
display_name: "Epiaeschna heros"
}
item {
name: "9924"
id: 578
display_name: "Piranga flava"
}
item {
name: "42339"
id: 579
display_name: "Tragelaphus strepsiceros"
}
item {
name: "50892"
id: 580
display_name: "Euphydryas chalcedona"
}
item {
name: "130348"
id: 581
display_name: "Dione moneta"
}
item {
name: "394966"
id: 582
display_name: "Phaulacridium marginale"
}
item {
name: "9943"
id: 583
display_name: "Amphispiza bilineata"
}
item {
name: "4388"
id: 584
display_name: "Larus dominicanus"
}
item {
name: "1758"
id: 585
display_name: "Piaya cayana"
}
item {
name: "50913"
id: 586
display_name: "Hyalophora euryalus"
}
item {
name: "9958"
id: 587
display_name: "Aimophila ruficeps"
}
item {
name: "59115"
id: 588
display_name: "Gambusia affinis"
}
item {
name: "64346"
id: 589
display_name: "Natrix tessellata"
}
item {
name: "59119"
id: 590
display_name: "Pontia protodice"
}
item {
name: "18160"
id: 591
display_name: "Melanerpes lewis"
}
item {
name: "18161"
id: 592
display_name: "Melanerpes uropygialis"
}
item {
name: "50931"
id: 593
display_name: "Strymon melinus"
}
item {
name: "59124"
id: 594
display_name: "Anthocharis sara"
}
item {
name: "59127"
id: 595
display_name: "Lycaena helloides"
}
item {
name: "59128"
id: 596
display_name: "Atlides halesus"
}
item {
name: "67324"
id: 597
display_name: "Eurema daira"
}
item {
name: "9981"
id: 598
display_name: "Passerculus sandwichensis"
}
item {
name: "59134"
id: 599
display_name: "Satyrium sylvinus"
}
item {
name: "67327"
id: 600
display_name: "Schistocerca obscura"
}
item {
name: "67328"
id: 601
display_name: "Pholcus phalangioides"
}
item {
name: "59138"
id: 602
display_name: "Satyrium saepium"
}
item {
name: "132867"
id: 603
display_name: "Microtia elva"
}
item {
name: "18181"
id: 604
display_name: "Melanerpes pucherani"
}
item {
name: "7486"
id: 605
display_name: "Salpinctes obsoletus"
}
item {
name: "108303"
id: 606
display_name: "Paltothemis lineatipes"
}
item {
name: "59152"
id: 607
display_name: "Leptotes marina"
}
item {
name: "132881"
id: 608
display_name: "Catocala ultronia"
}
item {
name: "143662"
id: 609
display_name: "Orthosoma brunneum"
}
item {
name: "59164"
id: 610
display_name: "Plebejus icarioides"
}
item {
name: "18205"
id: 611
display_name: "Melanerpes carolinus"
}
item {
name: "18206"
id: 612
display_name: "Melanerpes chrysogenys"
}
item {
name: "83744"
id: 613
display_name: "Amblyomma americanum"
}
item {
name: "18209"
id: 614
display_name: "Melanerpes formicivorus"
}
item {
name: "116517"
id: 615
display_name: "Caiman crocodilus"
}
item {
name: "59176"
id: 616
display_name: "Phyciodes mylitta"
}
item {
name: "59182"
id: 617
display_name: "Euphydryas editha"
}
item {
name: "43997"
id: 618
display_name: "Myocastor coypus"
}
item {
name: "59185"
id: 619
display_name: "Coenonympha tullia"
}
item {
name: "59187"
id: 620
display_name: "Erynnis propertius"
}
item {
name: "59188"
id: 621
display_name: "Erynnis funeralis"
}
item {
name: "59189"
id: 622
display_name: "Erynnis tristis"
}
item {
name: "59190"
id: 623
display_name: "Heliopetes ericetorum"
}
item {
name: "34615"
id: 624
display_name: "Gekko gecko"
}
item {
name: "42808"
id: 625
display_name: "Trichosurus vulpecula"
}
item {
name: "59194"
id: 626
display_name: "Ochlodes sylvanoides"
}
item {
name: "59195"
id: 627
display_name: "Lerodea eufala"
}
item {
name: "18236"
id: 628
display_name: "Colaptes auratus"
}
item {
name: "10045"
id: 629
display_name: "Basileuterus rufifrons"
}
item {
name: "59202"
id: 630
display_name: "Larus michahellis"
}
item {
name: "10053"
id: 631
display_name: "Ramphocelus passerinii"
}
item {
name: "19975"
id: 632
display_name: "Athene cunicularia"
}
item {
name: "82231"
id: 633
display_name: "Periplaneta americana"
}
item {
name: "67409"
id: 634
display_name: "Gobiesox maeandricus"
}
item {
name: "83795"
id: 635
display_name: "Cipangopaludina chinensis"
}
item {
name: "59220"
id: 636
display_name: "Branta hutchinsii"
}
item {
name: "10069"
id: 637
display_name: "Fringilla montifringilla"
}
item {
name: "10070"
id: 638
display_name: "Fringilla coelebs"
}
item {
name: "83802"
id: 639
display_name: "Megacyllene robiniae"
}
item {
name: "83804"
id: 640
display_name: "Dynastes tityus"
}
item {
name: "51039"
id: 641
display_name: "Cepaea hortensis"
}
item {
name: "68062"
id: 642
display_name: "Menemerus bivittatus"
}
item {
name: "47527"
id: 643
display_name: "Ostracion meleagris"
}
item {
name: "67435"
id: 644
display_name: "Urbanus proteus"
}
item {
name: "10094"
id: 645
display_name: "Junco hyemalis"
}
item {
name: "67440"
id: 646
display_name: "Utetheisa ornatrix"
}
item {
name: "100210"
id: 647
display_name: "Epitheca canis"
}
item {
name: "1907"
id: 648
display_name: "Cuculus canorus"
}
item {
name: "100215"
id: 649
display_name: "Epitheca princeps"
}
item {
name: "27826"
id: 650
display_name: "Taricha granulosa"
}
item {
name: "129147"
id: 651
display_name: "Ammophila procera"
}
item {
name: "10111"
id: 652
display_name: "Junco phaeonotus"
}
item {
name: "83844"
id: 653
display_name: "Oxyopes salticus"
}
item {
name: "144107"
id: 654
display_name: "Tetracis crocallata"
}
item {
name: "51097"
id: 655
display_name: "Papilio zelicaon"
}
item {
name: "10138"
id: 656
display_name: "Ammodramus nelsoni"
}
item {
name: "10139"
id: 657
display_name: "Ammodramus savannarum"
}
item {
name: "10147"
id: 658
display_name: "Ammodramus maritimus"
}
item {
name: "59300"
id: 659
display_name: "Anagrapha falcifera"
}
item {
name: "51110"
id: 660
display_name: "Xylocopa virginica"
}
item {
name: "1960"
id: 661
display_name: "Coccyzus erythropthalmus"
}
item {
name: "42652"
id: 662
display_name: "Didelphis virginiana"
}
item {
name: "428606"
id: 663
display_name: "Heraclides rumiko"
}
item {
name: "127303"
id: 664
display_name: "Callophrys henrici"
}
item {
name: "1964"
id: 665
display_name: "Coccyzus minor"
}
item {
name: "1965"
id: 666
display_name: "Coccyzus americanus"
}
item {
name: "8520"
id: 667
display_name: "Nucifraga columbiana"
}
item {
name: "116658"
id: 668
display_name: "Siphanta acuta"
}
item {
name: "1972"
id: 669
display_name: "Crotophaga sulcirostris"
}
item {
name: "10168"
id: 670
display_name: "Pooecetes gramineus"
}
item {
name: "53893"
id: 671
display_name: "Chlosyne palla"
}
item {
name: "10173"
id: 672
display_name: "Arremonops rufivirgatus"
}
item {
name: "1986"
id: 673
display_name: "Geococcyx californianus"
}
item {
name: "1987"
id: 674
display_name: "Geococcyx velox"
}
item {
name: "116680"
id: 675
display_name: "Tabanus atratus"
}
item {
name: "116681"
id: 676
display_name: "Atteva aurea"
}
item {
name: "124875"
id: 677
display_name: "Spodoptera litura"
}
item {
name: "26575"
id: 678
display_name: "Diadophis punctatus"
}
item {
name: "10199"
id: 679
display_name: "Coereba flaveola"
}
item {
name: "26591"
id: 680
display_name: "Diadophis punctatus edwardsii"
}
item {
name: "59360"
id: 681
display_name: "Neverita duplicata"
}
item {
name: "68263"
id: 682
display_name: "Papilio multicaudata"
}
item {
name: "26598"
id: 683
display_name: "Diadophis punctatus amabilis"
}
item {
name: "42983"
id: 684
display_name: "Phascolarctos cinereus"
}
item {
name: "67560"
id: 685
display_name: "Adelpha californica"
}
item {
name: "10224"
id: 686
display_name: "Passerina ciris"
}
item {
name: "2038"
id: 687
display_name: "Alectura lathami"
}
item {
name: "10232"
id: 688
display_name: "Passerina leclancherii"
}
item {
name: "10234"
id: 689
display_name: "Passerina amoena"
}
item {
name: "10243"
id: 690
display_name: "Icteria virens"
}
item {
name: "2052"
id: 691
display_name: "Crax rubra"
}
item {
name: "94551"
id: 692
display_name: "Argia immunda"
}
item {
name: "2062"
id: 693
display_name: "Penelope purpurascens"
}
item {
name: "204490"
id: 694
display_name: "Copsychus malabaricus"
}
item {
name: "10257"
id: 695
display_name: "Paroaria capitata"
}
item {
name: "51221"
id: 696
display_name: "Procambarus clarkii"
}
item {
name: "10262"
id: 697
display_name: "Cyanerpes cyaneus"
}
item {
name: "508249"
id: 698
display_name: "Microcarbo melanoleucos brevirostris"
}
item {
name: "18460"
id: 699
display_name: "Sphyrapicus thyroideus"
}
item {
name: "10271"
id: 700
display_name: "Pheucticus ludovicianus"
}
item {
name: "18464"
id: 701
display_name: "Sphyrapicus ruber"
}
item {
name: "10274"
id: 702
display_name: "Pheucticus melanocephalus"
}
item {
name: "18467"
id: 703
display_name: "Sphyrapicus nuchalis"
}
item {
name: "100391"
id: 704
display_name: "Erythrodiplax berenice"
}
item {
name: "2089"
id: 705
display_name: "Ortalis poliocephala"
}
item {
name: "2090"
id: 706
display_name: "Ortalis vetula"
}
item {
name: "8038"
id: 707
display_name: "Corvus albus"
}
item {
name: "67629"
id: 708
display_name: "Oligocottus maculosus"
}
item {
name: "10286"
id: 709
display_name: "Mniotilta varia"
}
item {
name: "10288"
id: 710
display_name: "Volatinia jacarina"
}
item {
name: "100403"
id: 711
display_name: "Erythrodiplax minuscula"
}
item {
name: "84023"
id: 712
display_name: "Amorpha juglandis"
}
item {
name: "84024"
id: 713
display_name: "Galasa nigrinodis"
}
item {
name: "10297"
id: 714
display_name: "Thraupis palmarum"
}
item {
name: "67642"
id: 715
display_name: "Pantherophis spiloides"
}
item {
name: "67653"
id: 716
display_name: "Phoebis agarithe"
}
item {
name: "84038"
id: 717
display_name: "Haploa lecontei"
}
item {
name: "26695"
id: 718
display_name: "Scaphiopus holbrookii"
}
item {
name: "84040"
id: 719
display_name: "Chauliognathus marginatus"
}
item {
name: "51275"
id: 720
display_name: "Pentatoma rufipes"
}
item {
name: "2124"
id: 721
display_name: "Momotus mexicanus"
}
item {
name: "26702"
id: 722
display_name: "Spea hammondii"
}
item {
name: "10325"
id: 723
display_name: "Euphagus cyanocephalus"
}
item {
name: "43102"
id: 724
display_name: "Sylvilagus palustris"
}
item {
name: "49509"
id: 725
display_name: "Lutjanus griseus"
}
item {
name: "116834"
id: 726
display_name: "Cacatua galerita"
}
item {
name: "127188"
id: 727
display_name: "Junco hyemalis oreganus"
}
item {
name: "26725"
id: 728
display_name: "Ambystoma jeffersonianum"
}
item {
name: "43111"
id: 729
display_name: "Sylvilagus floridanus"
}
item {
name: "43112"
id: 730
display_name: "Sylvilagus bachmani"
}
item {
name: "67691"
id: 731
display_name: "Lophocampa maculata"
}
item {
name: "51311"
id: 732
display_name: "Urbanus dorantes"
}
item {
name: "67700"
id: 733
display_name: "Caracolus caracolla"
}
item {
name: "43128"
id: 734
display_name: "Lepus europaeus"
}
item {
name: "26745"
id: 735
display_name: "Ambystoma texanum"
}
item {
name: "67706"
id: 736
display_name: "Argiope argentata"
}
item {
name: "26747"
id: 737
display_name: "Ambystoma gracile"
}
item {
name: "67708"
id: 738
display_name: "Argiope trifasciata"
}
item {
name: "26749"
id: 739
display_name: "Ambystoma tigrinum"
}
item {
name: "4896"
id: 740
display_name: "Pluvialis fulva"
}
item {
name: "10369"
id: 741
display_name: "Molothrus aeneus"
}
item {
name: "26754"
id: 742
display_name: "Ambystoma macrodactylum"
}
item {
name: "10373"
id: 743
display_name: "Molothrus ater"
}
item {
name: "2185"
id: 744
display_name: "Merops pusillus"
}
item {
name: "84109"
id: 745
display_name: "Pisaurina mira"
}
item {
name: "67726"
id: 746
display_name: "Aeshna palmata"
}
item {
name: "2191"
id: 747
display_name: "Merops apiaster"
}
item {
name: "67731"
id: 748
display_name: "Anax junius"
}
item {
name: "198804"
id: 749
display_name: "Satyrium titus"
}
item {
name: "51349"
id: 750
display_name: "Pyrgus communis"
}
item {
name: "18584"
id: 751
display_name: "Pteroglossus torquatus"
}
item {
name: "67737"
id: 752
display_name: "Rhionaeschna multicolor"
}
item {
name: "198812"
id: 753
display_name: "Lethe anthedon"
}
item {
name: "321697"
id: 754
display_name: "Melanchroia chephise"
}
item {
name: "198821"
id: 755
display_name: "Pieris oleracea"
}
item {
name: "26790"
id: 756
display_name: "Ambystoma maculatum"
}
item {
name: "10411"
id: 757
display_name: "Loxia curvirostra"
}
item {
name: "133295"
id: 758
display_name: "Melitaea didyma"
}
item {
name: "67760"
id: 759
display_name: "Popillia japonica"
}
item {
name: "43188"
id: 760
display_name: "Ochotona princeps"
}
item {
name: "2229"
id: 761
display_name: "Merops orientalis"
}
item {
name: "10423"
id: 762
display_name: "Loxia leucoptera"
}
item {
name: "67771"
id: 763
display_name: "Leptoglossus occidentalis"
}
item {
name: "84162"
id: 764
display_name: "Chrysochus auratus"
}
item {
name: "26822"
id: 765
display_name: "Dicamptodon tenebrosus"
}
item {
name: "26823"
id: 766
display_name: "Dicamptodon ensatus"
}
item {
name: "51402"
id: 767
display_name: "Megalops atlanticus"
}
item {
name: "67725"
id: 768
display_name: "Aeshna interrupta"
}
item {
name: "411858"
id: 769
display_name: "Vanessa gonerilla gonerilla"
}
item {
name: "26835"
id: 770
display_name: "Drymobius margaritiferus"
}
item {
name: "84185"
id: 771
display_name: "Megalopyge opercularis"
}
item {
name: "2266"
id: 772
display_name: "Coracias garrulus"
}
item {
name: "141531"
id: 773
display_name: "Lethe eurydice"
}
item {
name: "2269"
id: 774
display_name: "Coracias caudatus"
}
item {
name: "133346"
id: 775
display_name: "Melittia cucurbitae"
}
item {
name: "2275"
id: 776
display_name: "Coracias benghalensis"
}
item {
name: "84196"
id: 777
display_name: "Pontania californica"
}
item {
name: "10470"
id: 778
display_name: "Xanthocephalus xanthocephalus"
}
item {
name: "10479"
id: 779
display_name: "Chondestes grammacus"
}
item {
name: "51440"
id: 780
display_name: "Pituophis catenifer catenifer"
}
item {
name: "54087"
id: 781
display_name: "Pieris napi"
}
item {
name: "59635"
id: 782
display_name: "Phragmatopoma californica"
}
item {
name: "10487"
id: 783
display_name: "Dolichonyx oryzivorus"
}
item {
name: "67835"
id: 784
display_name: "Danaus chrysippus"
}
item {
name: "59644"
id: 785
display_name: "Pantherophis alleghaniensis"
}
item {
name: "59646"
id: 786
display_name: "Pantherophis bairdi"
}
item {
name: "116999"
id: 787
display_name: "Pandion haliaetus"
}
item {
name: "117002"
id: 788
display_name: "Phainopepla nitens"
}
item {
name: "16770"
id: 789
display_name: "Tyrannus couchii"
}
item {
name: "84239"
id: 790
display_name: "Callophrys gryneus"
}
item {
name: "104553"
id: 791
display_name: "Leucorrhinia proxima"
}
item {
name: "117016"
id: 792
display_name: "Phylloscopus collybita"
}
item {
name: "49540"
id: 793
display_name: "Gasteracantha cancriformis"
}
item {
name: "59675"
id: 794
display_name: "Pyrrharctia isabella"
}
item {
name: "469277"
id: 795
display_name: "Neotibicen superbus"
}
item {
name: "236973"
id: 796
display_name: "Circus cyaneus hudsonius"
}
item {
name: "59683"
id: 797
display_name: "Porpita porpita"
}
item {
name: "26916"
id: 798
display_name: "Contia tenuis"
}
item {
name: "51493"
id: 799
display_name: "Trimerotropis pallidipennis"
}
item {
name: "51495"
id: 800
display_name: "Anthocharis cardamines"
}
item {
name: "133416"
id: 801
display_name: "Phoebis philea"
}
item {
name: "8583"
id: 802
display_name: "Grallina cyanoleuca"
}
item {
name: "395569"
id: 803
display_name: "Prionoplus reticularis"
}
item {
name: "59698"
id: 804
display_name: "Velella velella"
}
item {
name: "141626"
id: 805
display_name: "Lygaeus turcicus"
}
item {
name: "84286"
id: 806
display_name: "Diapheromera femorata"
}
item {
name: "117059"
id: 807
display_name: "Plectrophenax nivalis"
}
item {
name: "133447"
id: 808
display_name: "Crambus agitatellus"
}
item {
name: "133448"
id: 809
display_name: "Climaciella brunnea"
}
item {
name: "51534"
id: 810
display_name: "Leptotes cassius"
}
item {
name: "205197"
id: 811
display_name: "Eutrapela clemataria"
}
item {
name: "51536"
id: 812
display_name: "Ascia monuste"
}
item {
name: "10585"
id: 813
display_name: "Calamospiza melanocorys"
}
item {
name: "49552"
id: 814
display_name: "Scutigera coleoptrata"
}
item {
name: "51555"
id: 815
display_name: "Sympetrum illotum"
}
item {
name: "51557"
id: 816
display_name: "Bombylius major"
}
item {
name: "117095"
id: 817
display_name: "Regulus calendula"
}
item {
name: "117097"
id: 818
display_name: "Regulus ignicapilla"
}
item {
name: "117099"
id: 819
display_name: "Regulus regulus"
}
item {
name: "117100"
id: 820
display_name: "Regulus satrapa"
}
item {
name: "84333"
id: 821
display_name: "Eudryas grata"
}
item {
name: "215409"
id: 822
display_name: "Bradybaena similaris"
}
item {
name: "16787"
id: 823
display_name: "Tyrannus melancholicus"
}
item {
name: "46225"
id: 824
display_name: "Tamias dorsalis"
}
item {
name: "59774"
id: 825
display_name: "Pachydiplax longipennis"
}
item {
name: "59776"
id: 826
display_name: "Perithemis tenera"
}
item {
name: "119014"
id: 827
display_name: "Argia fumipennis violacea"
}
item {
name: "4326"
id: 828
display_name: "Pelecanus conspicillatus"
}
item {
name: "18833"
id: 829
display_name: "Aulacorhynchus prasinus"
}
item {
name: "43411"
id: 830
display_name: "Ateles geoffroyi"
}
item {
name: "141725"
id: 831
display_name: "Nezara viridula"
}
item {
name: "51614"
id: 832
display_name: "Eurema hecabe"
}
item {
name: "125343"
id: 833
display_name: "Crepidula fornicata"
}
item {
name: "2464"
id: 834
display_name: "Todiramphus sanctus"
}
item {
name: "43432"
id: 835
display_name: "Cebus capucinus"
}
item {
name: "43436"
id: 836
display_name: "Alouatta palliata"
}
item {
name: "43439"
id: 837
display_name: "Alouatta pigra"
}
item {
name: "9357"
id: 838
display_name: "Icterus bullockii"
}
item {
name: "84403"
id: 839
display_name: "Phyllopalpus pulchellus"
}
item {
name: "10676"
id: 840
display_name: "Spiza americana"
}
item {
name: "16798"
id: 841
display_name: "Tyrannus dominicensis"
}
item {
name: "141752"
id: 842
display_name: "Biblis hyperia"
}
item {
name: "4512"
id: 843
display_name: "Chlidonias niger"
}
item {
name: "43460"
id: 844
display_name: "Macaca mulatta"
}
item {
name: "51654"
id: 845
display_name: "Junonia almana"
}
item {
name: "51659"
id: 846
display_name: "Anthopleura xanthogrammica"
}
item {
name: "84428"
id: 847
display_name: "Drepana arcuata"
}
item {
name: "10702"
id: 848
display_name: "Oriturus superciliosus"
}
item {
name: "68047"
id: 849
display_name: "Psarocolius montezuma"
}
item {
name: "12707"
id: 850
display_name: "Turdus pilaris"
}
item {
name: "84437"
id: 851
display_name: "Nicrophorus orbicollis"
}
item {
name: "84438"
id: 852
display_name: "Platyprepia virginalis"
}
item {
name: "117209"
id: 853
display_name: "Notiomystis cincta"
}
item {
name: "343393"
id: 854
display_name: "Hypsopygia olinalis"
}
item {
name: "27101"
id: 855
display_name: "Eurycea longicauda"
}
item {
name: "117214"
id: 856
display_name: "Sagittarius serpentarius"
}
item {
name: "18911"
id: 857
display_name: "Psittacula krameri"
}
item {
name: "117218"
id: 858
display_name: "Verrucosa arenata"
}
item {
name: "117221"
id: 859
display_name: "Dasymutilla occidentalis"
}
item {
name: "35303"
id: 860
display_name: "Ctenosaura similis"
}
item {
name: "18920"
id: 861
display_name: "Platycercus eximius"
}
item {
name: "10729"
id: 862
display_name: "Protonotaria citrea"
}
item {
name: "35306"
id: 863
display_name: "Ctenosaura pectinata"
}
item {
name: "109650"
id: 864
display_name: "Platycnemis pennipes"
}
item {
name: "27120"
id: 865
display_name: "Eurycea bislineata"
}
item {
name: "27123"
id: 866
display_name: "Eurycea lucifuga"
}
item {
name: "51702"
id: 867
display_name: "Coccinella septempunctata"
}
item {
name: "2552"
id: 868
display_name: "Megaceryle torquata"
}
item {
name: "133625"
id: 869
display_name: "Zanclognatha jacchusalis"
}
item {
name: "18943"
id: 870
display_name: "Nestor meridionalis"
}
item {
name: "84481"
id: 871
display_name: "Calopteryx maculata"
}
item {
name: "35330"
id: 872
display_name: "Sauromalus ater"
}
item {
name: "27140"
id: 873
display_name: "Coluber constrictor priapus"
}
item {
name: "199179"
id: 874
display_name: "Polistes chinensis"
}
item {
name: "51724"
id: 875
display_name: "Mopalia lignosa"
}
item {
name: "27149"
id: 876
display_name: "Coluber constrictor constrictor"
}
item {
name: "35342"
id: 877
display_name: "Iguana iguana"
}
item {
name: "27153"
id: 878
display_name: "Coluber constrictor flaviventris"
}
item {
name: "35347"
id: 879
display_name: "Amblyrhynchus cristatus"
}
item {
name: "125461"
id: 880
display_name: "Ursus arctos horribilis"
}
item {
name: "84507"
id: 881
display_name: "Lygus lineolaris"
}
item {
name: "35356"
id: 882
display_name: "Dipsosaurus dorsalis"
}
item {
name: "51743"
id: 883
display_name: "Danaus gilippus"
}
item {
name: "18976"
id: 884
display_name: "Amazona viridigenalis"
}
item {
name: "125475"
id: 885
display_name: "Plusiodonta compressipalpis"
}
item {
name: "51748"
id: 886
display_name: "Danaus gilippus thersippus"
}
item {
name: "68137"
id: 887
display_name: "Chlorocebus pygerythrus"
}
item {
name: "133675"
id: 888
display_name: "Coenobita clypeatus"
}
item {
name: "215596"
id: 889
display_name: "Buprestis aurulenta"
}
item {
name: "117293"
id: 890
display_name: "Oecophylla smaragdina"
}
item {
name: "68142"
id: 891
display_name: "Prenolepis imparis"
}
item {
name: "27184"
id: 892
display_name: "Plethodon glutinosus"
}
item {
name: "27186"
id: 893
display_name: "Plethodon cinereus"
}
item {
name: "18995"
id: 894
display_name: "Amazona albifrons"
}
item {
name: "51765"
id: 895
display_name: "Poanes melane"
}
item {
name: "18998"
id: 896
display_name: "Amazona oratrix"
}
item {
name: "41396"
id: 897
display_name: "Rhynchonycteris naso"
}
item {
name: "27194"
id: 898
display_name: "Plethodon vehiculum"
}
item {
name: "51773"
id: 899
display_name: "Nathalis iole"
}
item {
name: "12908"
id: 900
display_name: "Saxicola rubetra"
}
item {
name: "68165"
id: 901
display_name: "Linepithema humile"
}
item {
name: "154721"
id: 902
display_name: "Brachygastra mellifica"
}
item {
name: "338504"
id: 903
display_name: "Xanthocnemis zealandica"
}
item {
name: "338505"
id: 904
display_name: "Melangyna novaezelandiae"
}
item {
name: "27093"
id: 905
display_name: "Eurycea cirrigera"
}
item {
name: "65975"
id: 906
display_name: "Lithobates berlandieri"
}
item {
name: "19020"
id: 907
display_name: "Ara militaris"
}
item {
name: "474210"
id: 908
display_name: "Spizelloides arborea"
}
item {
name: "205240"
id: 909
display_name: "Pantographa limata"
}
item {
name: "27226"
id: 910
display_name: "Plethodon albagula"
}
item {
name: "318545"
id: 911
display_name: "Coreus marginatus"
}
item {
name: "2662"
id: 912
display_name: "Ceryle rudis"
}
item {
name: "109161"
id: 913
display_name: "Perithemis intensa"
}
item {
name: "51824"
id: 914
display_name: "Calopteryx splendens"
}
item {
name: "27250"
id: 915
display_name: "Ensatina eschscholtzii"
}
item {
name: "2676"
id: 916
display_name: "Chloroceryle aenea"
}
item {
name: "2679"
id: 917
display_name: "Chloroceryle amazona"
}
item {
name: "84602"
id: 918
display_name: "Zale lunata"
}
item {
name: "133756"
id: 919
display_name: "Leptoglossus oppositus"
}
item {
name: "35453"
id: 920
display_name: "Zootoca vivipara"
}
item {
name: "84612"
id: 921
display_name: "Polyphylla decemlineata"
}
item {
name: "133765"
id: 922
display_name: "Eumenes fraternus"
}
item {
name: "68230"
id: 923
display_name: "Brachymesia gravida"
}
item {
name: "49601"
id: 924
display_name: "Mola mola"
}
item {
name: "68232"
id: 925
display_name: "Papilio palamedes"
}
item {
name: "68233"
id: 926
display_name: "Orthemis ferruginea"
}
item {
name: "68239"
id: 927
display_name: "Parnassius clodius"
}
item {
name: "68240"
id: 928
display_name: "Chlosyne lacinia"
}
item {
name: "68244"
id: 929
display_name: "Euptoieta claudia"
}
item {
name: "68249"
id: 930
display_name: "Dymasia dymas"
}
item {
name: "68251"
id: 931
display_name: "Limenitis weidemeyerii"
}
item {
name: "133790"
id: 932
display_name: "Chalybion californicum"
}
item {
name: "84644"
id: 933
display_name: "Phalangium opilio"
}
item {
name: "68262"
id: 934
display_name: "Polygonia faunus"
}
item {
name: "133799"
id: 935
display_name: "Xenox tigrinus"
}
item {
name: "68264"
id: 936
display_name: "Asterocampa celtis"
}
item {
name: "132892"
id: 937
display_name: "Anacridium aegyptium"
}
item {
name: "68268"
id: 938
display_name: "Euptoieta hegesia"
}
item {
name: "68269"
id: 939
display_name: "Aglais milberti"
}
item {
name: "43694"
id: 940
display_name: "Loxodonta africana"
}
item {
name: "59165"
id: 941
display_name: "Apodemia mormo"
}
item {
name: "68274"
id: 942
display_name: "Phyciodes phaon"
}
item {
name: "68275"
id: 943
display_name: "Battus polydamas"
}
item {
name: "84662"
id: 944
display_name: "Celastrina lucia"
}
item {
name: "16842"
id: 945
display_name: "Myiozetetes similis"
}
item {
name: "133826"
id: 946
display_name: "Zelus longipes"
}
item {
name: "14912"
id: 947
display_name: "Toxostoma curvirostre"
}
item {
name: "53708"
id: 948
display_name: "Pacifastacus leniusculus"
}
item {
name: "117452"
id: 949
display_name: "Sphinx kalmiae"
}
item {
name: "182997"
id: 950
display_name: "Megisto rubricata"
}
item {
name: "223965"
id: 951
display_name: "Lithacodia musta"
}
item {
name: "125663"
id: 952
display_name: "Kelletia kelletii"
}
item {
name: "125669"
id: 953
display_name: "Rumina decollata"
}
item {
name: "68328"
id: 954
display_name: "Oxythyrea funesta"
}
item {
name: "179324"
id: 955
display_name: "Dactylotum bicolor"
}
item {
name: "68330"
id: 956
display_name: "Arctia caja"
}
item {
name: "2548"
id: 957
display_name: "Megaceryle alcyon"
}
item {
name: "207600"
id: 958
display_name: "Thasus neocalifornicus"
}
item {
name: "207601"
id: 959
display_name: "Palpita quadristigmalis"
}
item {
name: "51954"
id: 960
display_name: "Sphecius speciosus"
}
item {
name: "207603"
id: 961
display_name: "Prolimacodes badia"
}
item {
name: "7294"
id: 962
display_name: "Eremophila alpestris"
}
item {
name: "19196"
id: 963
display_name: "Alisterus scapularis"
}
item {
name: "145194"
id: 964
display_name: "Cinnyris jugularis"
}
item {
name: "27390"
id: 965
display_name: "Desmognathus ochrophaeus"
}
item {
name: "207615"
id: 966
display_name: "Polistes apachus"
}
item {
name: "63275"
id: 967
display_name: "Tremex columba"
}
item {
name: "61910"
id: 968
display_name: "Orgyia antiqua"
}
item {
name: "199438"
id: 969
display_name: "Orgyia postica"
}
item {
name: "43794"
id: 970
display_name: "Castor canadensis"
}
item {
name: "84755"
id: 971
display_name: "Arion rufus"
}
item {
name: "51996"
id: 972
display_name: "Daphnis nerii"
}
item {
name: "194075"
id: 973
display_name: "Drymarchon melanurus erebennus"
}
item {
name: "133923"
id: 974
display_name: "Mermiria bivittata"
}
item {
name: "84778"
id: 975
display_name: "Leptinotarsa decemlineata"
}
item {
name: "11051"
id: 976
display_name: "Xiphorhynchus flavigaster"
}
item {
name: "121992"
id: 977
display_name: "Cervus elaphus roosevelti"
}
item {
name: "27459"
id: 978
display_name: "Batrachoseps attenuatus"
}
item {
name: "84806"
id: 979
display_name: "Acanalonia conica"
}
item {
name: "52043"
id: 980
display_name: "Spoladea recurvalis"
}
item {
name: "27468"
id: 981
display_name: "Batrachoseps major"
}
item {
name: "133966"
id: 982
display_name: "Lomographa vestaliata"
}
item {
name: "27474"
id: 983
display_name: "Batrachoseps nigriventris"
}
item {
name: "101204"
id: 984
display_name: "Gambusia holbrooki"
}
item {
name: "52055"
id: 985
display_name: "Crocothemis servilia"
}
item {
name: "4580"
id: 986
display_name: "Jacana jacana"
}
item {
name: "346970"
id: 987
display_name: "Callophrys dumetorum"
}
item {
name: "27486"
id: 988
display_name: "Pseudotriton ruber"
}
item {
name: "52075"
id: 989
display_name: "Atalopedes campestris"
}
item {
name: "27500"
id: 990
display_name: "Gyrinophilus porphyriticus"
}
item {
name: "73203"
id: 991
display_name: "Phalaropus fulicarius"
}
item {
name: "322417"
id: 992
display_name: "Limacus flavus"
}
item {
name: "40083"
id: 993
display_name: "Gopherus berlandieri"
}
item {
name: "68469"
id: 994
display_name: "Papilio demodocus"
}
item {
name: "2938"
id: 995
display_name: "Streptopelia turtur"
}
item {
name: "117633"
id: 996
display_name: "Mopalia muscosa"
}
item {
name: "117641"
id: 997
display_name: "Nucella lamellosa"
}
item {
name: "322443"
id: 998
display_name: "Thasus gigas"
}
item {
name: "68492"
id: 999
display_name: "Hemidactylus mabouia"
}
item {
name: "143853"
id: 1000
display_name: "Pica hudsonia"
}
item {
name: "144757"
id: 1001
display_name: "Corvus cornix"
}
item {
name: "117650"
id: 1002
display_name: "Mytilus edulis"
}
item {
name: "19349"
id: 1003
display_name: "Myiopsitta monachus"
}
item {
name: "2969"
id: 1004
display_name: "Streptopelia decaocto"
}
item {
name: "9919"
id: 1005
display_name: "Piranga ludoviciana"
}
item {
name: "5009"
id: 1006
display_name: "Ixobrychus exilis"
}
item {
name: "117666"
id: 1007
display_name: "Pleuroncodes planipes"
}
item {
name: "7603"
id: 1008
display_name: "Auriparus flaviceps"
}
item {
name: "117674"
id: 1009
display_name: "Ligia occidentalis"
}
item {
name: "145223"
id: 1010
display_name: "Geothlypis tolmiei"
}
item {
name: "60341"
id: 1011
display_name: "Lithobates sphenocephalus"
}
item {
name: "60342"
id: 1012
display_name: "Thamnophis proximus"
}
item {
name: "52155"
id: 1013
display_name: "Dermacentor variabilis"
}
item {
name: "60349"
id: 1014
display_name: "Scincella lateralis"
}
item {
name: "52158"
id: 1015
display_name: "Schistocerca nitens"
}
item {
name: "117696"
id: 1016
display_name: "Dendraster excentricus"
}
item {
name: "232391"
id: 1017
display_name: "Tetracha carolina"
}
item {
name: "3017"
id: 1018
display_name: "Columba livia"
}
item {
name: "145229"
id: 1019
display_name: "Setophaga citrina"
}
item {
name: "84950"
id: 1020
display_name: "Alypia octomaculata"
}
item {
name: "52188"
id: 1021
display_name: "Rhincodon typus"
}
item {
name: "494559"
id: 1022
display_name: "Polydrusus formosus"
}
item {
name: "145232"
id: 1023
display_name: "Setophaga cerulea"
}
item {
name: "3048"
id: 1024
display_name: "Columba palumbus"
}
item {
name: "9922"
id: 1025
display_name: "Piranga bidentata"
}
item {
name: "44026"
id: 1026
display_name: "Erethizon dorsatum"
}
item {
name: "61505"
id: 1027
display_name: "Manduca sexta"
}
item {
name: "84994"
id: 1028
display_name: "Acanthocephala declivis"
}
item {
name: "27652"
id: 1029
display_name: "Hemidactylium scutatum"
}
item {
name: "117767"
id: 1030
display_name: "Cervus elaphus nannodes"
}
item {
name: "494603"
id: 1031
display_name: "Hermissenda opalescens"
}
item {
name: "39819"
id: 1032
display_name: "Terrapene carolina bauri"
}
item {
name: "3093"
id: 1033
display_name: "Patagioenas leucocephala"
}
item {
name: "205316"
id: 1034
display_name: "Aidemona azteca"
}
item {
name: "216093"
id: 1035
display_name: "Caracolus marginella"
}
item {
name: "44062"
id: 1036
display_name: "Thomomys bottae"
}
item {
name: "85024"
id: 1037
display_name: "Heraclides cresphontes"
}
item {
name: "3108"
id: 1038
display_name: "Patagioenas fasciata"
}
item {
name: "213510"
id: 1039
display_name: "Anageshna primordialis"
}
item {
name: "85030"
id: 1040
display_name: "Crocothemis erythraea"
}
item {
name: "85034"
id: 1041
display_name: "Neoscona crucifera"
}
item {
name: "3117"
id: 1042
display_name: "Patagioenas flavirostris"
}
item {
name: "207924"
id: 1043
display_name: "Synchlora frondaria"
}
item {
name: "35900"
id: 1044
display_name: "Lacerta bilineata"
}
item {
name: "24382"
id: 1045
display_name: "Osteopilus septentrionalis"
}
item {
name: "145249"
id: 1046
display_name: "Setophaga discolor"
}
item {
name: "52297"
id: 1047
display_name: "Triakis semifasciata"
}
item {
name: "27726"
id: 1048
display_name: "Salamandra salamandra"
}
item {
name: "27727"
id: 1049
display_name: "Bogertophis subocularis"
}
item {
name: "143043"
id: 1050
display_name: "Cycnia tenera"
}
item {
name: "52313"
id: 1051
display_name: "Diodon hystrix"
}
item {
name: "143316"
id: 1052
display_name: "Schinia florida"
}
item {
name: "61968"
id: 1053
display_name: "Graphosoma lineatum"
}
item {
name: "502885"
id: 1054
display_name: "Lissachatina fulica"
}
item {
name: "71029"
id: 1055
display_name: "Crotalus cerastes cerastes"
}
item {
name: "207977"
id: 1056
display_name: "Aglais io"
}
item {
name: "19577"
id: 1057
display_name: "Chordeiles minor"
}
item {
name: "93312"
id: 1058
display_name: "Acropora palmata"
}
item {
name: "52354"
id: 1059
display_name: "Ambystoma laterale"
}
item {
name: "19587"
id: 1060
display_name: "Chordeiles acutipennis"
}
item {
name: "58585"
id: 1061
display_name: "Limenitis arthemis astyanax"
}
item {
name: "134277"
id: 1062
display_name: "Gastrophryne olivacea"
}
item {
name: "60551"
id: 1063
display_name: "Papilio glaucus"
}
item {
name: "3731"
id: 1064
display_name: "Platalea leucorodia"
}
item {
name: "232593"
id: 1065
display_name: "Thyris sepulchralis"
}
item {
name: "19609"
id: 1066
display_name: "Phalaenoptilus nuttallii"
}
item {
name: "126106"
id: 1067
display_name: "Haploa clymene"
}
item {
name: "27805"
id: 1068
display_name: "Notophthalmus viridescens"
}
item {
name: "199840"
id: 1069
display_name: "Haemorhous mexicanus"
}
item {
name: "199841"
id: 1070
display_name: "Haemorhous purpureus"
}
item {
name: "219719"
id: 1071
display_name: "Eudryas unio"
}
item {
name: "27818"
id: 1072
display_name: "Taricha torosa"
}
item {
name: "19627"
id: 1073
display_name: "Nyctidromus albicollis"
}
item {
name: "28750"
id: 1074
display_name: "Salvadora grahamiae lineata"
}
item {
name: "27824"
id: 1075
display_name: "Taricha rivularis"
}
item {
name: "146632"
id: 1076
display_name: "Toxomerus politus"
}
item {
name: "52402"
id: 1077
display_name: "Cetonia aurata"
}
item {
name: "18291"
id: 1078
display_name: "Campephilus guatemalensis"
}
item {
name: "60598"
id: 1079
display_name: "Ixodes scapularis"
}
item {
name: "199870"
id: 1080
display_name: "Pyralis farinalis"
}
item {
name: "60607"
id: 1081
display_name: "Limenitis arthemis"
}
item {
name: "205241"
id: 1082
display_name: "Plagodis phlogosaria"
}
item {
name: "14898"
id: 1083
display_name: "Toxostoma rufum"
}
item {
name: "126153"
id: 1084
display_name: "Amphion floridensis"
}
item {
name: "126155"
id: 1085
display_name: "Vespula germanica"
}
item {
name: "51392"
id: 1086
display_name: "Morone saxatilis"
}
item {
name: "3280"
id: 1087
display_name: "Leptotila verreauxi"
}
item {
name: "19670"
id: 1088
display_name: "Nyctibius jamaicensis"
}
item {
name: "6929"
id: 1089
display_name: "Anas penelope"
}
item {
name: "97738"
id: 1090
display_name: "Chromagrion conditum"
}
item {
name: "52449"
id: 1091
display_name: "Rhinecanthus rectangulus"
}
item {
name: "52451"
id: 1092
display_name: "Naso lituratus"
}
item {
name: "56529"
id: 1093
display_name: "Papilio machaon"
}
item {
name: "199913"
id: 1094
display_name: "Buteo plagiatus"
}
item {
name: "199914"
id: 1095
display_name: "Selasphorus calliope"
}
item {
name: "85227"
id: 1096
display_name: "Hemideina crassidens"
}
item {
name: "36076"
id: 1097
display_name: "Cophosaurus texanus"
}
item {
name: "36077"
id: 1098
display_name: "Cophosaurus texanus texanus"
}
item {
name: "208112"
id: 1099
display_name: "Palpita magniferalis"
}
item {
name: "85235"
id: 1100
display_name: "Deinacrida rugosa"
}
item {
name: "93429"
id: 1101
display_name: "Aeshna constricta"
}
item {
name: "36086"
id: 1102
display_name: "Callisaurus draconoides rhodostictus"
}
item {
name: "126204"
id: 1103
display_name: "Synchlora aerata"
}
item {
name: "93437"
id: 1104
display_name: "Aeshna mixta"
}
item {
name: "126207"
id: 1105
display_name: "Schizura unicornis"
}
item {
name: "126209"
id: 1106
display_name: "Metcalfa pruinosa"
}
item {
name: "126211"
id: 1107
display_name: "Poecilocapsus lineatus"
}
item {
name: "36100"
id: 1108
display_name: "Uta stansburiana elegans"
}
item {
name: "48342"
id: 1109
display_name: "Hemigrapsus nudus"
}
item {
name: "199942"
id: 1110
display_name: "Strategus aloeus"
}
item {
name: "126215"
id: 1111
display_name: "Monobia quadridens"
}
item {
name: "101640"
id: 1112
display_name: "Gomphaeschna furcillata"
}
item {
name: "126217"
id: 1113
display_name: "Pyrausta orphisalis"
}
item {
name: "36107"
id: 1114
display_name: "Urosaurus ornatus"
}
item {
name: "51940"
id: 1115
display_name: "Hemidactylus frenatus"
}
item {
name: "36121"
id: 1116
display_name: "Urosaurus graciosus"
}
item {
name: "19743"
id: 1117
display_name: "Megascops kennicottii"
}
item {
name: "68901"
id: 1118
display_name: "Salticus scenicus"
}
item {
name: "44326"
id: 1119
display_name: "Microtus californicus"
}
item {
name: "82481"
id: 1120
display_name: "Pieris marginalis"
}
item {
name: "474332"
id: 1121
display_name: "Porphyrio poliocephalus"
}
item {
name: "81674"
id: 1122
display_name: "Rivula propinqualis"
}
item {
name: "126252"
id: 1123
display_name: "Mastigoproctus giganteus"
}
item {
name: "36142"
id: 1124
display_name: "Sceloporus undulatus"
}
item {
name: "68911"
id: 1125
display_name: "Libellula needhami"
}
item {
name: "68912"
id: 1126
display_name: "Dysdera crocata"
}
item {
name: "42888"
id: 1127
display_name: "Macropus giganteus"
}
item {
name: "19765"
id: 1128
display_name: "Megascops asio"
}
item {
name: "68918"
id: 1129
display_name: "Poecilanthrax lucifer"
}
item {
name: "333705"
id: 1130
display_name: "Pantherophis obsoletus lindheimeri"
}
item {
name: "126267"
id: 1131
display_name: "Coleomegilla maculata"
}
item {
name: "101693"
id: 1132
display_name: "Gomphus vastus"
}
item {
name: "85221"
id: 1133
display_name: "Hemideina thoracica"
}
item {
name: "126276"
id: 1134
display_name: "Agrotis ipsilon"
}
item {
name: "85317"
id: 1135
display_name: "Eurosta solidaginis"
}
item {
name: "36169"
id: 1136
display_name: "Sceloporus spinosus"
}
item {
name: "60752"
id: 1137
display_name: "Hermeuptychia sosybius"
}
item {
name: "60754"
id: 1138
display_name: "Pyromorpha dimidiata"
}
item {
name: "126291"
id: 1139
display_name: "Prosapia bicincta"
}
item {
name: "52564"
id: 1140
display_name: "Anthopleura elegantissima"
}
item {
name: "126293"
id: 1141
display_name: "Prionoxystus robiniae"
}
item {
name: "120719"
id: 1142
display_name: "Pseudacris hypochondriaca"
}
item {
name: "36189"
id: 1143
display_name: "Sceloporus poinsettii"
}
item {
name: "52576"
id: 1144
display_name: "Uroctonus mordax"
}
item {
name: "36198"
id: 1145
display_name: "Sceloporus orcutti"
}
item {
name: "52584"
id: 1146
display_name: "Pantala hymenaea"
}
item {
name: "44395"
id: 1147
display_name: "Peromyscus leucopus"
}
item {
name: "36204"
id: 1148
display_name: "Sceloporus occidentalis"
}
item {
name: "52589"
id: 1149
display_name: "Coenonympha pamphilus"
}
item {
name: "3439"
id: 1150
display_name: "Zenaida auriculata"
}
item {
name: "36208"
id: 1151
display_name: "Sceloporus occidentalis bocourtii"
}
item {
name: "72936"
id: 1152
display_name: "Hymenolaimus malacorhynchos"
}
item {
name: "85362"
id: 1153
display_name: "Sphex ichneumoneus"
}
item {
name: "36217"
id: 1154
display_name: "Sceloporus merriami"
}
item {
name: "68993"
id: 1155
display_name: "Liometopum occidentale"
}
item {
name: "199916"
id: 1156
display_name: "Setophaga caerulescens"
}
item {
name: "52620"
id: 1157
display_name: "Cicindela oregona"
}
item {
name: "36243"
id: 1158
display_name: "Sceloporus jarrovii"
}
item {
name: "52628"
id: 1159
display_name: "Araneus diadematus"
}
item {
name: "180007"
id: 1160
display_name: "Otospermophilus beecheyi"
}
item {
name: "85408"
id: 1161
display_name: "Erythemis collocata"
}
item {
name: "36262"
id: 1162
display_name: "Sceloporus grammicus"
}
item {
name: "60839"
id: 1163
display_name: "Spilosoma virginica"
}
item {
name: "16968"
id: 1164
display_name: "Camptostoma imberbe"
}
item {
name: "4715"
id: 1165
display_name: "Caracara plancus"
}
item {
name: "313246"
id: 1166
display_name: "Olla v-nigrum"
}
item {
name: "126393"
id: 1167
display_name: "Stomolophus meleagris"
}
item {
name: "126397"
id: 1168
display_name: "Halysidota harrisii"
}
item {
name: "64221"
id: 1169
display_name: "Bipalium kewense"
}
item {
name: "28102"
id: 1170
display_name: "Virginia striatula"
}
item {
name: "150985"
id: 1171
display_name: "Planorbella trivolvis"
}
item {
name: "36306"
id: 1172
display_name: "Phrynosoma modestum"
}
item {
name: "36307"
id: 1173
display_name: "Phrynosoma orbiculare"
}
item {
name: "199929"
id: 1174
display_name: "Plagiometriona clavata"
}
item {
name: "3545"
id: 1175
display_name: "Columbina passerina"
}
item {
name: "36315"
id: 1176
display_name: "Phrynosoma hernandesi"
}
item {
name: "367556"
id: 1177
display_name: "Eupsittula nana"
}
item {
name: "371963"
id: 1178
display_name: "Lampropeltis multifasciata"
}
item {
name: "36339"
id: 1179
display_name: "Holbrookia propinqua"
}
item {
name: "36094"
id: 1180
display_name: "Uta stansburiana"
}
item {
name: "36343"
id: 1181
display_name: "Holbrookia maculata"
}
item {
name: "52766"
id: 1182
display_name: "Megaphasma denticrus"
}
item {
name: "18941"
id: 1183
display_name: "Nestor notabilis"
}
item {
name: "3580"
id: 1184
display_name: "Columbina talpacoti"
}
item {
name: "123690"
id: 1185
display_name: "Caranx melampygus"
}
item {
name: "52482"
id: 1186
display_name: "Episyrphus balteatus"
}
item {
name: "28762"
id: 1187
display_name: "Rhinocheilus lecontei"
}
item {
name: "3607"
id: 1188
display_name: "Geopelia striata"
}
item {
name: "52484"
id: 1189
display_name: "Celastrina echo"
}
item {
name: "61293"
id: 1190
display_name: "Thaumetopoea pityocampa"
}
item {
name: "19998"
id: 1191
display_name: "Athene noctua"
}
item {
name: "44575"
id: 1192
display_name: "Rattus rattus"
}
item {
name: "44576"
id: 1193
display_name: "Rattus norvegicus"
}
item {
name: "133250"
id: 1194
display_name: "Tettigonia viridissima"
}
item {
name: "52774"
id: 1195
display_name: "Bombus fervidus"
}
item {
name: "49756"
id: 1196
display_name: "Nephila clavipes"
}
item {
name: "52779"
id: 1197
display_name: "Bombus bimaculatus"
}
item {
name: "52782"
id: 1198
display_name: "Melissodes bimaculata"
}
item {
name: "126513"
id: 1199
display_name: "Larinioides cornutus"
}
item {
name: "69170"
id: 1200
display_name: "Hemigrapsus oregonensis"
}
item {
name: "1971"
id: 1201
display_name: "Crotophaga ani"
}
item {
name: "12942"
id: 1202
display_name: "Sialia sialis"
}
item {
name: "126532"
id: 1203
display_name: "Toxomerus geminatus"
}
item {
name: "216649"
id: 1204
display_name: "Chauliognathus pensylvanicus"
}
item {
name: "3734"
id: 1205
display_name: "Platalea alba"
}
item {
name: "216651"
id: 1206
display_name: "Chelinidea vittiger"
}
item {
name: "20044"
id: 1207
display_name: "Bubo virginianus"
}
item {
name: "11855"
id: 1208
display_name: "Petrochelidon fulva"
}
item {
name: "28246"
id: 1209
display_name: "Arizona elegans"
}
item {
name: "224855"
id: 1210
display_name: "Melipotis indomita"
}
item {
name: "11867"
id: 1211
display_name: "Progne subis"
}
item {
name: "126562"
id: 1212
display_name: "Setophaga coronata auduboni"
}
item {
name: "126568"
id: 1213
display_name: "Manduca rustica"
}
item {
name: "11882"
id: 1214
display_name: "Hirundo neoxena"
}
item {
name: "11901"
id: 1215
display_name: "Hirundo rustica"
}
item {
name: "52865"
id: 1216
display_name: "Tramea lacerata"
}
item {
name: "142978"
id: 1217
display_name: "Simyra insularis"
}
item {
name: "123499"
id: 1218
display_name: "Notophthalmus viridescens viridescens"
}
item {
name: "339592"
id: 1219
display_name: "Calidris virgata"
}
item {
name: "339593"
id: 1220
display_name: "Calidris pugnax"
}
item {
name: "44311"
id: 1221
display_name: "Microtus pennsylvanicus"
}
item {
name: "142988"
id: 1222
display_name: "Lerema accius"
}
item {
name: "142990"
id: 1223
display_name: "Autographa precationis"
}
item {
name: "142995"
id: 1224
display_name: "Hymenia perspectalis"
}
item {
name: "129423"
id: 1225
display_name: "Zelus luridus"
}
item {
name: "3733"
id: 1226
display_name: "Platalea regia"
}
item {
name: "470678"
id: 1227
display_name: "Cerithideopsis californica"
}
item {
name: "146713"
id: 1228
display_name: "Elaphria grata"
}
item {
name: "143002"
id: 1229
display_name: "Orthonama obstipata"
}
item {
name: "11931"
id: 1230
display_name: "Tachycineta thalassina"
}
item {
name: "143005"
id: 1231
display_name: "Costaconvexa centrostrigaria"
}
item {
name: "3743"
id: 1232
display_name: "Bostrychia hagedash"
}
item {
name: "143009"
id: 1233
display_name: "Ectropis crepuscularia"
}
item {
name: "36514"
id: 1234
display_name: "Anolis carolinensis"
}
item {
name: "143012"
id: 1235
display_name: "Zanclognatha pedipilalis"
}
item {
name: "11941"
id: 1236
display_name: "Riparia riparia"
}
item {
name: "52902"
id: 1237
display_name: "Palthis asopialis"
}
item {
name: "3751"
id: 1238
display_name: "Eudocimus albus"
}
item {
name: "52906"
id: 1239
display_name: "Chytonix palliatricula"
}
item {
name: "3756"
id: 1240
display_name: "Plegadis falcinellus"
}
item {
name: "3759"
id: 1241
display_name: "Plegadis chihi"
}
item {
name: "143024"
id: 1242
display_name: "Eusarca confusaria"
}
item {
name: "62067"
id: 1243
display_name: "Orthetrum cancellatum"
}
item {
name: "28340"
id: 1244
display_name: "Thamnophis sauritus"
}
item {
name: "28345"
id: 1245
display_name: "Thamnophis cyrtopsis"
}
item {
name: "143034"
id: 1246
display_name: "Hippodamia variegata"
}
item {
name: "28347"
id: 1247
display_name: "Thamnophis cyrtopsis ocellatus"
}
item {
name: "52925"
id: 1248
display_name: "Phyciodes tharos"
}
item {
name: "8010"
id: 1249
display_name: "Corvus corax"
}
item {
name: "11970"
id: 1250
display_name: "Stelgidopteryx serripennis"
}
item {
name: "28362"
id: 1251
display_name: "Thamnophis sirtalis"
}
item {
name: "3788"
id: 1252
display_name: "Sula dactylatra"
}
item {
name: "44749"
id: 1253
display_name: "Neotoma fuscipes"
}
item {
name: "52943"
id: 1254
display_name: "Trichodezia albovittata"
}
item {
name: "3793"
id: 1255
display_name: "Sula sula"
}
item {
name: "101667"
id: 1256
display_name: "Gomphus exilis"
}
item {
name: "3797"
id: 1257
display_name: "Sula leucogaster"
}
item {
name: "118486"
id: 1258
display_name: "Macaria aemulataria"
}
item {
name: "3801"
id: 1259
display_name: "Morus serrator"
}
item {
name: "28378"
id: 1260
display_name: "Thamnophis radix"
}
item {
name: "118492"
id: 1261
display_name: "Helicoverpa zea"
}
item {
name: "148793"
id: 1262
display_name: "Asterocampa leilia"
}
item {
name: "28384"
id: 1263
display_name: "Thamnophis proximus rubrilineatus"
}
item {
name: "257761"
id: 1264
display_name: "Phocides polybius"
}
item {
name: "28387"
id: 1265
display_name: "Thamnophis proximus orarius"
}
item {
name: "28390"
id: 1266
display_name: "Thamnophis marcianus"
}
item {
name: "118503"
id: 1267
display_name: "Darapsa myron"
}
item {
name: "3817"
id: 1268
display_name: "Eudyptula minor"
}
item {
name: "36135"
id: 1269
display_name: "Uma scoparia"
}
item {
name: "28396"
id: 1270
display_name: "Thamnophis hammondii"
}
item {
name: "28400"
id: 1271
display_name: "Thamnophis elegans elegans"
}
item {
name: "118513"
id: 1272
display_name: "Hypena scabra"
}
item {
name: "28403"
id: 1273
display_name: "Thamnophis elegans vagrans"
}
item {
name: "201342"
id: 1274
display_name: "Chalcoela iphitalis"
}
item {
name: "3831"
id: 1275
display_name: "Megadyptes antipodes"
}
item {
name: "126712"
id: 1276
display_name: "Corydalus cornutus"
}
item {
name: "30676"
id: 1277
display_name: "Agkistrodon piscivorus leucostoma"
}
item {
name: "3834"
id: 1278
display_name: "Scopus umbretta"
}
item {
name: "213631"
id: 1279
display_name: "Anicla infecta"
}
item {
name: "143105"
id: 1280
display_name: "Pleuroprucha insulsaria"
}
item {
name: "28418"
id: 1281
display_name: "Thamnophis atratus"
}
item {
name: "118531"
id: 1282
display_name: "Parallelia bistriaris"
}
item {
name: "145363"
id: 1283
display_name: "Troglodytes troglodytes"
}
item {
name: "3845"
id: 1284
display_name: "Calidris canutus"
}
item {
name: "12038"
id: 1285
display_name: "Lanius collurio"
}
item {
name: "143114"
id: 1286
display_name: "Phragmatobia fuliginosa"
}
item {
name: "3851"
id: 1287
display_name: "Calidris bairdii"
}
item {
name: "324226"
id: 1288
display_name: "Meleagris gallopavo intermedia"
}
item {
name: "143118"
id: 1289
display_name: "Pseudeustrotia carneola"
}
item {
name: "3855"
id: 1290
display_name: "Calidris mauri"
}
item {
name: "3856"
id: 1291
display_name: "Calidris maritima"
}
item {
name: "3857"
id: 1292
display_name: "Calidris alpina"
}
item {
name: "143124"
id: 1293
display_name: "Parapediasia teterrella"
}
item {
name: "143125"
id: 1294
display_name: "Hypena madefactalis"
}
item {
name: "3863"
id: 1295
display_name: "Calidris ferruginea"
}
item {
name: "118552"
id: 1296
display_name: "Felis catus"
}
item {
name: "3865"
id: 1297
display_name: "Calidris melanotos"
}
item {
name: "3869"
id: 1298
display_name: "Limnodromus griseus"
}
item {
name: "118558"
id: 1299
display_name: "Manduca quinquemaculata"
}
item {
name: "118559"
id: 1300
display_name: "Tetraopes tetrophthalmus"
}
item {
name: "12065"
id: 1301
display_name: "Malurus cyaneus"
}
item {
name: "3878"
id: 1302
display_name: "Tringa nebularia"
}
item {
name: "101681"
id: 1303
display_name: "Gomphus militaris"
}
item {
name: "413483"
id: 1304
display_name: "Todiramphus sanctus vagans"
}
item {
name: "3885"
id: 1305
display_name: "Tringa ochropus"
}
item {
name: "3888"
id: 1306
display_name: "Tringa glareola"
}
item {
name: "126770"
id: 1307
display_name: "Vulpes vulpes fulvus"
}
item {
name: "3892"
id: 1308
display_name: "Tringa melanoleuca"
}
item {
name: "3893"
id: 1309
display_name: "Tringa flavipes"
}
item {
name: "126775"
id: 1310
display_name: "Cervus elaphus nelsoni"
}
item {
name: "3896"
id: 1311
display_name: "Numenius arquata"
}
item {
name: "126777"
id: 1312
display_name: "Peucetia viridans"
}
item {
name: "3901"
id: 1313
display_name: "Numenius phaeopus"
}
item {
name: "32058"
id: 1314
display_name: "Elgaria multicarinata webbii"
}
item {
name: "413506"
id: 1315
display_name: "Phalacrocorax carbo novaehollandiae"
}
item {
name: "413508"
id: 1316
display_name: "Petroica macrocephala macrocephala"
}
item {
name: "413512"
id: 1317
display_name: "Petroica australis longipes"
}
item {
name: "61258"
id: 1318
display_name: "Junonia evarete"
}
item {
name: "28493"
id: 1319
display_name: "Tantilla nigriceps"
}
item {
name: "413522"
id: 1320
display_name: "Prosthemadera novaeseelandiae novaeseelandiae"
}
item {
name: "58506"
id: 1321
display_name: "Polites themistocles"
}
item {
name: "28505"
id: 1322
display_name: "Tantilla gracilis"
}
item {
name: "20315"
id: 1323
display_name: "Asio flammeus"
}
item {
name: "143196"
id: 1324
display_name: "Schinia arcigera"
}
item {
name: "413533"
id: 1325
display_name: "Rhipidura fuliginosa fuliginosa"
}
item {
name: "3936"
id: 1326
display_name: "Scolopax minor"
}
item {
name: "3938"
id: 1327
display_name: "Arenaria interpres"
}
item {
name: "3941"
id: 1328
display_name: "Arenaria melanocephala"
}
item {
name: "413543"
id: 1329
display_name: "Rhipidura fuliginosa placabilis"
}
item {
name: "3947"
id: 1330
display_name: "Limosa limosa"
}
item {
name: "3950"
id: 1331
display_name: "Limosa haemastica"
}
item {
name: "126269"
id: 1332
display_name: "Austrolestes colensonis"
}
item {
name: "3954"
id: 1333
display_name: "Limosa fedoa"
}
item {
name: "199998"
id: 1334
display_name: "Pedicia albivitta"
}
item {
name: "3959"
id: 1335
display_name: "Phalaropus lobatus"
}
item {
name: "3962"
id: 1336
display_name: "Bartramia longicauda"
}
item {
name: "199999"
id: 1337
display_name: "Callopistria mollissima"
}
item {
name: "104426"
id: 1338
display_name: "Lestes disjunctus"
}
item {
name: "126848"
id: 1339
display_name: "Delphinia picta"
}
item {
name: "3951"
id: 1340
display_name: "Limosa lapponica"
}
item {
name: "20356"
id: 1341
display_name: "Aegolius acadicus"
}
item {
name: "121792"
id: 1342
display_name: "Polistes carolina"
}
item {
name: "3978"
id: 1343
display_name: "Actitis hypoleucos"
}
item {
name: "53911"
id: 1344
display_name: "Cyprinus carpio"
}
item {
name: "135055"
id: 1345
display_name: "Bufotes balearicus"
}
item {
name: "19121"
id: 1346
display_name: "Trichoglossus haematodus"
}
item {
name: "28562"
id: 1347
display_name: "Storeria dekayi"
}
item {
name: "28563"
id: 1348
display_name: "Storeria dekayi texana"
}
item {
name: "20372"
id: 1349
display_name: "Surnia ulula"
}
item {
name: "135064"
id: 1350
display_name: "Bufotes viridis"
}
item {
name: "28570"
id: 1351
display_name: "Storeria dekayi dekayi"
}
item {
name: "61341"
id: 1352
display_name: "Narceus americanus"
}
item {
name: "7493"
id: 1353
display_name: "Polioptila caerulea"
}
item {
name: "29339"
id: 1354
display_name: "Natrix natrix"
}
item {
name: "9135"
id: 1355
display_name: "Spizella passerina"
}
item {
name: "126889"
id: 1356
display_name: "Toxomerus marginatus"
}
item {
name: "143274"
id: 1357
display_name: "Gluphisia septentrionis"
}
item {
name: "343021"
id: 1358
display_name: "Anguis fragilis"
}
item {
name: "14591"
id: 1359
display_name: "Pycnonotus jocosus"
}
item {
name: "10227"
id: 1360
display_name: "Passerina cyanea"
}
item {
name: "10228"
id: 1361
display_name: "Passerina versicolor"
}
item {
name: "61371"
id: 1362
display_name: "Panulirus interruptus"
}
item {
name: "143294"
id: 1363
display_name: "Colias croceus"
}
item {
name: "135104"
id: 1364
display_name: "Ichthyosaura alpestris"
}
item {
name: "83958"
id: 1365
display_name: "Phryganidia californica"
}
item {
name: "143302"
id: 1366
display_name: "Megapallifera mutabilis"
}
item {
name: "12231"
id: 1367
display_name: "Manorina melanocephala"
}
item {
name: "200661"
id: 1368
display_name: "Coluber constrictor mormon"
}
item {
name: "3681"
id: 1369
display_name: "Ocyphaps lophotes"
}
item {
name: "4773"
id: 1370
display_name: "Jabiru mycteria"
}
item {
name: "135140"
id: 1371
display_name: "Taricha sierrae"
}
item {
name: "28649"
id: 1372
display_name: "Sonora semiannulata"
}
item {
name: "53226"
id: 1373
display_name: "Boisea rubrolineata"
}
item {
name: "53227"
id: 1374
display_name: "Boisea trivittata"
}
item {
name: "14593"
id: 1375
display_name: "Pycnonotus cafer"
}
item {
name: "61428"
id: 1376
display_name: "Arion subfuscus"
}
item {
name: "333822"
id: 1377
display_name: "Anser cygnoides domesticus"
}
item {
name: "41641"
id: 1378
display_name: "Ursus arctos"
}
item {
name: "56602"
id: 1379
display_name: "Plebejus lupini"
}
item {
name: "55295"
id: 1380
display_name: "Grapsus grapsus"
}
item {
name: "36181"
id: 1381
display_name: "Sceloporus cyanogenys"
}
item {
name: "41708"
id: 1382
display_name: "Phoca vitulina"
}
item {
name: "118788"
id: 1383
display_name: "Desmia funeralis"
}
item {
name: "61445"
id: 1384
display_name: "Acanthocephala terminalis"
}
item {
name: "30721"
id: 1385
display_name: "Crotalus triseriatus"
}
item {
name: "180010"
id: 1386
display_name: "Callospermophilus lateralis"
}
item {
name: "53875"
id: 1387
display_name: "Ocypode quadrata"
}
item {
name: "18358"
id: 1388
display_name: "Picus viridis"
}
item {
name: "143390"
id: 1389
display_name: "Oxidus gracilis"
}
item {
name: "55785"
id: 1390
display_name: "Ochlodes agricola"
}
item {
name: "4141"
id: 1391
display_name: "Phoebastria nigripes"
}
item {
name: "20526"
id: 1392
display_name: "Struthio camelus"
}
item {
name: "32093"
id: 1393
display_name: "Boa constrictor"
}
item {
name: "4144"
id: 1394
display_name: "Phoebastria immutabilis"
}
item {
name: "74442"
id: 1395
display_name: "Hydrochoerus hydrochaeris"
}
item {
name: "61492"
id: 1396
display_name: "Chrysopilus thoracicus"
}
item {
name: "61495"
id: 1397
display_name: "Erythemis simplicicollis"
}
item {
name: "389177"
id: 1398
display_name: "Eriophora pustulosa"
}
item {
name: "61503"
id: 1399
display_name: "Ascalapha odorata"
}
item {
name: "118855"
id: 1400
display_name: "Calosoma scrutator"
}
item {
name: "61513"
id: 1401
display_name: "Adelges tsugae"
}
item {
name: "28749"
id: 1402
display_name: "Salvadora grahamiae"
}
item {
name: "143440"
id: 1403
display_name: "Ceratomia catalpae"
}
item {
name: "61523"
id: 1404
display_name: "Helix pomatia"
}
item {
name: "4180"
id: 1405
display_name: "Fulmarus glacialis"
}
item {
name: "143445"
id: 1406
display_name: "Pachysphinx modesta"
}
item {
name: "233560"
id: 1407
display_name: "Vespula squamosa"
}
item {
name: "126308"
id: 1408
display_name: "Marpesia chiron"
}
item {
name: "61536"
id: 1409
display_name: "Calopteryx virgo"
}
item {
name: "685"
id: 1410
display_name: "Francolinus pondicerianus"
}
item {
name: "60774"
id: 1411
display_name: "Psychomorpha epimenis"
}
item {
name: "135271"
id: 1412
display_name: "Amphibolips confluenta"
}
item {
name: "69736"
id: 1413
display_name: "Schistocerca americana"
}
item {
name: "69737"
id: 1414
display_name: "Xylophanes tersa"
}
item {
name: "6141"
id: 1415
display_name: "Cynanthus latirostris"
}
item {
name: "4205"
id: 1416
display_name: "Podiceps nigricollis"
}
item {
name: "69743"
id: 1417
display_name: "Wallengrenia otho"
}
item {
name: "4208"
id: 1418
display_name: "Podiceps cristatus"
}
item {
name: "4209"
id: 1419
display_name: "Podiceps auritus"
}
item {
name: "118901"
id: 1420
display_name: "Hyles gallii"
}
item {
name: "17871"
id: 1421
display_name: "Dendrocopos major"
}
item {
name: "143484"
id: 1422
display_name: "Blepharomastix ranalis"
}
item {
name: "4224"
id: 1423
display_name: "Podiceps grisegena"
}
item {
name: "200834"
id: 1424
display_name: "Sphenodon punctatus"
}
item {
name: "179995"
id: 1425
display_name: "Urocitellus beldingi"
}
item {
name: "322024"
id: 1426
display_name: "Apatura ilia"
}
item {
name: "44396"
id: 1427
display_name: "Peromyscus maniculatus"
}
item {
name: "4237"
id: 1428
display_name: "Tachybaptus ruficollis"
}
item {
name: "118930"
id: 1429
display_name: "Spodoptera ornithogalli"
}
item {
name: "118936"
id: 1430
display_name: "Euplagia quadripunctaria"
}
item {
name: "4804"
id: 1431
display_name: "Charadrius montanus"
}
item {
name: "127133"
id: 1432
display_name: "Hyphantria cunea"
}
item {
name: "143518"
id: 1433
display_name: "Prochoerodes lineola"
}
item {
name: "52592"
id: 1434
display_name: "Pararge aegeria"
}
item {
name: "36149"
id: 1435
display_name: "Sceloporus torquatus"
}
item {
name: "118951"
id: 1436
display_name: "Pterophylla camellifolia"
}
item {
name: "4265"
id: 1437
display_name: "Phalacrocorax auritus"
}
item {
name: "4270"
id: 1438
display_name: "Phalacrocorax carbo"
}
item {
name: "446640"
id: 1439
display_name: "Neomonachus schauinslandi"
}
item {
name: "118961"
id: 1440
display_name: "Conocephalus brevipennis"
}
item {
name: "28850"
id: 1441
display_name: "Regina septemvittata"
}
item {
name: "4277"
id: 1442
display_name: "Phalacrocorax penicillatus"
}
item {
name: "4234"
id: 1443
display_name: "Aechmophorus clarkii"
}
item {
name: "118967"
id: 1444
display_name: "Psyllobora vigintimaculata"
}
item {
name: "118968"
id: 1445
display_name: "Allograpta obliqua"
}
item {
name: "118970"
id: 1446
display_name: "Bombus impatiens"
}
item {
name: "123594"
id: 1447
display_name: "Anaxyrus americanus americanus"
}
item {
name: "69838"
id: 1448
display_name: "Cyanea capillata"
}
item {
name: "69844"
id: 1449
display_name: "Anthocharis midea"
}
item {
name: "48505"
id: 1450
display_name: "Junonia coenia"
}
item {
name: "151769"
id: 1451
display_name: "Diaphania hyalinata"
}
item {
name: "151770"
id: 1452
display_name: "Peridea angulosa"
}
item {
name: "53467"
id: 1453
display_name: "Leucauge venusta"
}
item {
name: "119013"
id: 1454
display_name: "Ctenucha virginica"
}
item {
name: "4327"
id: 1455
display_name: "Pelecanus onocrotalus"
}
item {
name: "143592"
id: 1456
display_name: "Spragueia leo"
}
item {
name: "200938"
id: 1457
display_name: "Diaethria anna"
}
item {
name: "4334"
id: 1458
display_name: "Pelecanus erythrorhynchos"
}
item {
name: "151794"
id: 1459
display_name: "Atta texana"
}
item {
name: "3454"
id: 1460
display_name: "Zenaida macroura"
}
item {
name: "4872"
id: 1461
display_name: "Vanellus miles"
}
item {
name: "4345"
id: 1462
display_name: "Larus occidentalis"
}
item {
name: "143610"
id: 1463
display_name: "Besma quercivoraria"
}
item {
name: "20733"
id: 1464
display_name: "Trogon massena"
}
item {
name: "143615"
id: 1465
display_name: "Udea rubigalis"
}
item {
name: "4352"
id: 1466
display_name: "Larus thayeri"
}
item {
name: "4353"
id: 1467
display_name: "Larus heermanni"
}
item {
name: "4354"
id: 1468
display_name: "Larus livens"
}
item {
name: "4356"
id: 1469
display_name: "Larus canus"
}
item {
name: "220826"
id: 1470
display_name: "Habrosyne scripta"
}
item {
name: "4361"
id: 1471
display_name: "Larus glaucoides"
}
item {
name: "4364"
id: 1472
display_name: "Larus delawarensis"
}
item {
name: "102672"
id: 1473
display_name: "Hetaerina titia"
}
item {
name: "20754"
id: 1474
display_name: "Trogon collaris"
}
item {
name: "479512"
id: 1475
display_name: "Acronicta fallax"
}
item {
name: "3460"
id: 1476
display_name: "Zenaida asiatica"
}
item {
name: "119066"
id: 1477
display_name: "Idia lubricalis"
}
item {
name: "119068"
id: 1478
display_name: "Apodemia virgulti"
}
item {
name: "4381"
id: 1479
display_name: "Larus fuscus"
}
item {
name: "4385"
id: 1480
display_name: "Larus californicus"
}
item {
name: "69922"
id: 1481
display_name: "Oncorhynchus nerka"
}
item {
name: "12580"
id: 1482
display_name: "Prosthemadera novaeseelandiae"
}
item {
name: "69925"
id: 1483
display_name: "Clinocardium nuttallii"
}
item {
name: "20781"
id: 1484
display_name: "Trogon elegans"
}
item {
name: "4399"
id: 1485
display_name: "Larus glaucescens"
}
item {
name: "94513"
id: 1486
display_name: "Archilestes grandis"
}
item {
name: "119090"
id: 1487
display_name: "Eremnophila aureonotata"
}
item {
name: "20787"
id: 1488
display_name: "Trogon citreolus"
}
item {
name: "69940"
id: 1489
display_name: "Hemiargus ceraunus"
}
item {
name: "61749"
id: 1490
display_name: "Lucanus cervus"
}
item {
name: "4415"
id: 1491
display_name: "Cepphus columba"
}
item {
name: "4832"
id: 1492
display_name: "Himantopus leucocephalus"
}
item {
name: "4418"
id: 1493
display_name: "Cepphus grylle"
}
item {
name: "12612"
id: 1494
display_name: "Anthornis melanura"
}
item {
name: "125627"
id: 1495
display_name: "Ellychnia corrusca"
}
item {
name: "201031"
id: 1496
display_name: "Leptoptilos crumenifer"
}
item {
name: "201032"
id: 1497
display_name: "Threskiornis moluccus"
}
item {
name: "60812"
id: 1498
display_name: "Lucanus capreolus"
}
item {
name: "10295"
id: 1499
display_name: "Thraupis episcopus"
}
item {
name: "209233"
id: 1500
display_name: "Equus caballus"
}
item {
name: "119122"
id: 1501
display_name: "Araneus trifolium"
}
item {
name: "201043"
id: 1502
display_name: "Geranoaetus albicaudatus"
}
item {
name: "61781"
id: 1503
display_name: "Ochlodes sylvanus"
}
item {
name: "49133"
id: 1504
display_name: "Vanessa atalanta"
}
item {
name: "94556"
id: 1505
display_name: "Argia lugens"
}
item {
name: "94557"
id: 1506
display_name: "Argia moesta"
}
item {
name: "61524"
id: 1507
display_name: "Forficula auricularia"
}
item {
name: "4449"
id: 1508
display_name: "Sterna paradisaea"
}
item {
name: "4450"
id: 1509
display_name: "Sterna hirundo"
}
item {
name: "348515"
id: 1510
display_name: "Nyctemera annulata"
}
item {
name: "110625"
id: 1511
display_name: "Progomphus obscurus"
}
item {
name: "94566"
id: 1512
display_name: "Argia plana"
}
item {
name: "4457"
id: 1513
display_name: "Sterna forsteri"
}
item {
name: "94571"
id: 1514
display_name: "Argia sedula"
}
item {
name: "61804"
id: 1515
display_name: "Olivella biplicata"
}
item {
name: "204532"
id: 1516
display_name: "Lanius excubitor"
}
item {
name: "29038"
id: 1517
display_name: "Pituophis deppei"
}
item {
name: "143728"
id: 1518
display_name: "Choristoneura rosaceana"
}
item {
name: "94577"
id: 1519
display_name: "Argia translata"
}
item {
name: "130451"
id: 1520
display_name: "Dione juno"
}
item {
name: "29044"
id: 1521
display_name: "Pituophis catenifer"
}
item {
name: "70005"
id: 1522
display_name: "Ilyanassa obsoleta"
}
item {
name: "143734"
id: 1523
display_name: "Eupithecia miserulata"
}
item {
name: "20856"
id: 1524
display_name: "Pharomachrus mocinno"
}
item {
name: "29049"
id: 1525
display_name: "Pituophis catenifer deserticola"
}
item {
name: "29052"
id: 1526
display_name: "Pituophis catenifer affinis"
}
item {
name: "29053"
id: 1527
display_name: "Pituophis catenifer annectens"
}
item {
name: "4478"
id: 1528
display_name: "Sterna striata"
}
item {
name: "407459"
id: 1529
display_name: "Dolomedes minor"
}
item {
name: "4489"
id: 1530
display_name: "Stercorarius parasiticus"
}
item {
name: "4491"
id: 1531
display_name: "Stercorarius pomarinus"
}
item {
name: "6969"
id: 1532
display_name: "Anas gracilis"
}
item {
name: "4494"
id: 1533
display_name: "Rissa tridactyla"
}
item {
name: "4496"
id: 1534
display_name: "Rynchops niger"
}
item {
name: "4501"
id: 1535
display_name: "Alca torda"
}
item {
name: "4504"
id: 1536
display_name: "Fratercula arctica"
}
item {
name: "4509"
id: 1537
display_name: "Fratercula cirrhata"
}
item {
name: "26693"
id: 1538
display_name: "Scaphiopus hurterii"
}
item {
name: "94624"
id: 1539
display_name: "Arigomphus submedianus"
}
item {
name: "94625"
id: 1540
display_name: "Arigomphus villosipes"
}
item {
name: "120720"
id: 1541
display_name: "Pseudacris sierra"
}
item {
name: "70057"
id: 1542
display_name: "Agrilus planipennis"
}
item {
name: "127402"
id: 1543
display_name: "Grammia virgo"
}
item {
name: "51271"
id: 1544
display_name: "Trachemys scripta elegans"
}
item {
name: "12716"
id: 1545
display_name: "Turdus merula"
}
item {
name: "12718"
id: 1546
display_name: "Turdus plumbeus"
}
item {
name: "12720"
id: 1547
display_name: "Turdus grayi"
}
item {
name: "63697"
id: 1548
display_name: "Metacarcinus magister"
}
item {
name: "12727"
id: 1549
display_name: "Turdus migratorius"
}
item {
name: "26698"
id: 1550
display_name: "Spea multiplicata"
}
item {
name: "12735"
id: 1551
display_name: "Turdus viscivorus"
}
item {
name: "26699"
id: 1552
display_name: "Spea bombifrons"
}
item {
name: "127431"
id: 1553
display_name: "Emmelina monodactyla"
}
item {
name: "4553"
id: 1554
display_name: "Cerorhinca monocerata"
}
item {
name: "12748"
id: 1555
display_name: "Turdus philomelos"
}
item {
name: "233933"
id: 1556
display_name: "Zale horrida"
}
item {
name: "1468"
id: 1557
display_name: "Galbula ruficauda"
}
item {
name: "111055"
id: 1558
display_name: "Pseudoleon superbus"
}
item {
name: "61908"
id: 1559
display_name: "Orgyia vetusta"
}
item {
name: "43086"
id: 1560
display_name: "Procavia capensis"
}
item {
name: "143830"
id: 1561
display_name: "Eumorpha vitis"
}
item {
name: "67663"
id: 1562
display_name: "Leptysma marginicollis"
}
item {
name: "127457"
id: 1563
display_name: "Idia americalis"
}
item {
name: "4578"
id: 1564
display_name: "Jacana spinosa"
}
item {
name: "127460"
id: 1565
display_name: "Idia aemula"
}
item {
name: "201192"
id: 1566
display_name: "Saxicola rubicola"
}
item {
name: "20969"
id: 1567
display_name: "Upupa epops"
}
item {
name: "94699"
id: 1568
display_name: "Aspidoscelis marmorata"
}
item {
name: "10322"
id: 1569
display_name: "Euphagus carolinus"
}
item {
name: "53743"
id: 1570
display_name: "Uca pugilator"
}
item {
name: "61256"
id: 1571
display_name: "Leptoglossus phyllopus"
}
item {
name: "29438"
id: 1572
display_name: "Coluber flagellum piceus"
}
item {
name: "53750"
id: 1573
display_name: "Lottia gigantea"
}
item {
name: "143865"
id: 1574
display_name: "Odocoileus hemionus hemionus"
}
item {
name: "143867"
id: 1575
display_name: "Protoboarmia porcelaria"
}
item {
name: "209405"
id: 1576
display_name: "Cenopis reticulatana"
}
item {
name: "49920"
id: 1577
display_name: "Nymphalis californica"
}
item {
name: "53762"
id: 1578
display_name: "Scolopendra polymorpha"
}
item {
name: "127492"
id: 1579
display_name: "Megalographa biloba"
}
item {
name: "62470"
id: 1580
display_name: "Limax maximus"
}
item {
name: "4621"
id: 1581
display_name: "Gavia pacifica"
}
item {
name: "14884"
id: 1582
display_name: "Mimus gilvus"
}
item {
name: "29200"
id: 1583
display_name: "Opheodrys aestivus"
}
item {
name: "201233"
id: 1584
display_name: "Passer italiae"
}
item {
name: "4626"
id: 1585
display_name: "Gavia immer"
}
item {
name: "4627"
id: 1586
display_name: "Gavia stellata"
}
item {
name: "12822"
id: 1587
display_name: "Oenanthe oenanthe"
}
item {
name: "4631"
id: 1588
display_name: "Fregata magnificens"
}
item {
name: "4636"
id: 1589
display_name: "Fregata minor"
}
item {
name: "70174"
id: 1590
display_name: "Hypolimnas bolina"
}
item {
name: "4643"
id: 1591
display_name: "Falco subbuteo"
}
item {
name: "4644"
id: 1592
display_name: "Falco mexicanus"
}
item {
name: "4645"
id: 1593
display_name: "Falco femoralis"
}
item {
name: "4647"
id: 1594
display_name: "Falco peregrinus"
}
item {
name: "119340"
id: 1595
display_name: "Amphipyra pyramidoides"
}
item {
name: "61997"
id: 1596
display_name: "Steatoda grossa"
}
item {
name: "70191"
id: 1597
display_name: "Ischnura ramburii"
}
item {
name: "53809"
id: 1598
display_name: "Phidippus audax"
}
item {
name: "143213"
id: 1599
display_name: "Frontinella communis"
}
item {
name: "4664"
id: 1600
display_name: "Falco rufigularis"
}
item {
name: "4665"
id: 1601
display_name: "Falco sparverius"
}
item {
name: "19893"
id: 1602
display_name: "Strix varia"
}
item {
name: "4672"
id: 1603
display_name: "Falco columbarius"
}
item {
name: "201281"
id: 1604
display_name: "Phyllodesma americana"
}
item {
name: "201282"
id: 1605
display_name: "Gallinula chloropus"
}
item {
name: "152131"
id: 1606
display_name: "Bagrada hilaris"
}
item {
name: "145276"
id: 1607
display_name: "Cardellina pusilla"
}
item {
name: "12878"
id: 1608
display_name: "Catharus ustulatus"
}
item {
name: "4690"
id: 1609
display_name: "Falco novaeseelandiae"
}
item {
name: "53843"
id: 1610
display_name: "Brephidium exilis"
}
item {
name: "36281"
id: 1611
display_name: "Sceloporus clarkii"
}
item {
name: "12890"
id: 1612
display_name: "Catharus guttatus"
}
item {
name: "62045"
id: 1613
display_name: "Lygaeus kalmii"
}
item {
name: "47075"
id: 1614
display_name: "Dasypus novemcinctus"
}
item {
name: "12901"
id: 1615
display_name: "Catharus fuscescens"
}
item {
name: "4714"
id: 1616
display_name: "Caracara cheriway"
}
item {
name: "53867"
id: 1617
display_name: "Erythemis plebeja"
}
item {
name: "62060"
id: 1618
display_name: "Palomena prasina"
}
item {
name: "53869"
id: 1619
display_name: "Ocypus olens"
}
item {
name: "4719"
id: 1620
display_name: "Herpetotheres cachinnans"
}
item {
name: "116840"
id: 1621
display_name: "Calcarius lapponicus"
}
item {
name: "4726"
id: 1622
display_name: "Milvago chimachima"
}
item {
name: "29304"
id: 1623
display_name: "Nerodia taxispilota"
}
item {
name: "29305"
id: 1624
display_name: "Nerodia sipedon"
}
item {
name: "29306"
id: 1625
display_name: "Nerodia sipedon sipedon"
}
item {
name: "142783"
id: 1626
display_name: "Myodocha serripes"
}
item {
name: "4733"
id: 1627
display_name: "Ciconia ciconia"
}
item {
name: "29310"
id: 1628
display_name: "Nerodia rhombifer"
}
item {
name: "201343"
id: 1629
display_name: "Lithacodes fasciola"
}
item {
name: "21121"
id: 1630
display_name: "Dendrobates auratus"
}
item {
name: "127618"
id: 1631
display_name: "Epirrhoe alternata"
}
item {
name: "43115"
id: 1632
display_name: "Sylvilagus audubonii"
}
item {
name: "29317"
id: 1633
display_name: "Nerodia fasciata"
}
item {
name: "4742"
id: 1634
display_name: "Mycteria americana"
}
item {
name: "53895"
id: 1635
display_name: "Stenopelmatus fuscus"
}
item {
name: "4744"
id: 1636
display_name: "Mycteria ibis"
}
item {
name: "12937"
id: 1637
display_name: "Sialia mexicana"
}
item {
name: "29322"
id: 1638
display_name: "Nerodia fasciata confluens"
}
item {
name: "29324"
id: 1639
display_name: "Nerodia clarkii clarkii"
}
item {
name: "29327"
id: 1640
display_name: "Nerodia cyclopion"
}
item {
name: "29328"
id: 1641
display_name: "Nerodia erythrogaster"
}
item {
name: "53905"
id: 1642
display_name: "Mantis religiosa"
}
item {
name: "4754"
id: 1643
display_name: "Ephippiorhynchus senegalensis"
}
item {
name: "127635"
id: 1644
display_name: "Plecia nearctica"
}
item {
name: "4756"
id: 1645
display_name: "Cathartes aura"
}
item {
name: "29334"
id: 1646
display_name: "Nerodia erythrogaster flavigaster"
}
item {
name: "12951"
id: 1647
display_name: "Myadestes townsendi"
}
item {
name: "4761"
id: 1648
display_name: "Cathartes burrovianus"
}
item {
name: "4763"
id: 1649
display_name: "Sarcoramphus papa"
}
item {
name: "4765"
id: 1650
display_name: "Coragyps atratus"
}
item {
name: "19890"
id: 1651
display_name: "Strix nebulosa"
}
item {
name: "26736"
id: 1652
display_name: "Ambystoma opacum"
}
item {
name: "66331"
id: 1653
display_name: "Pelophylax perezi"
}
item {
name: "4776"
id: 1654
display_name: "Anastomus lamelligerus"
}
item {
name: "4892"
id: 1655
display_name: "Pluvialis squatarola"
}
item {
name: "4778"
id: 1656
display_name: "Gymnogyps californianus"
}
item {
name: "12971"
id: 1657
display_name: "Muscicapa striata"
}
item {
name: "56776"
id: 1658
display_name: "Glaucopsyche lygdamus"
}
item {
name: "127669"
id: 1659
display_name: "Jadera haematoloma"
}
item {
name: "4793"
id: 1660
display_name: "Charadrius vociferus"
}
item {
name: "209594"
id: 1661
display_name: "Scantius aegyptius"
}
item {
name: "4795"
id: 1662
display_name: "Charadrius wilsonia"
}
item {
name: "48586"
id: 1663
display_name: "Cepaea nemoralis"
}
item {
name: "4798"
id: 1664
display_name: "Charadrius melodus"
}
item {
name: "12992"
id: 1665
display_name: "Phoenicurus phoenicurus"
}
item {
name: "45763"
id: 1666
display_name: "Ondatra zibethicus"
}
item {
name: "119492"
id: 1667
display_name: "Smerinthus cerisyi"
}
item {
name: "13000"
id: 1668
display_name: "Phoenicurus ochruros"
}
item {
name: "4811"
id: 1669
display_name: "Charadrius dubius"
}
item {
name: "64973"
id: 1670
display_name: "Anaxyrus cognatus"
}
item {
name: "2168"
id: 1671
display_name: "Eumomota superciliosa"
}
item {
name: "6980"
id: 1672
display_name: "Anas querquedula"
}
item {
name: "64975"
id: 1673
display_name: "Anaxyrus debilis"
}
item {
name: "43130"
id: 1674
display_name: "Lepus californicus"
}
item {
name: "67707"
id: 1675
display_name: "Argiope aurantia"
}
item {
name: "4836"
id: 1676
display_name: "Himantopus mexicanus"
}
item {
name: "4838"
id: 1677
display_name: "Haematopus bachmani"
}
item {
name: "43132"
id: 1678
display_name: "Lepus americanus"
}
item {
name: "144106"
id: 1679
display_name: "Pica pica"
}
item {
name: "4843"
id: 1680
display_name: "Haematopus ostralegus"
}
item {
name: "67709"
id: 1681
display_name: "Antrodiaetus riversi"
}
item {
name: "4848"
id: 1682
display_name: "Haematopus unicolor"
}
item {
name: "4857"
id: 1683
display_name: "Vanellus vanellus"
}
item {
name: "29435"
id: 1684
display_name: "Coluber flagellum testaceus"
}
item {
name: "119550"
id: 1685
display_name: "Feltia jaculifera"
}
item {
name: "4866"
id: 1686
display_name: "Vanellus spinosus"
}
item {
name: "4870"
id: 1687
display_name: "Vanellus armatus"
}
item {
name: "54024"
id: 1688
display_name: "Satyrium californica"
}
item {
name: "13071"
id: 1689
display_name: "Luscinia svecica"
}
item {
name: "3544"
id: 1690
display_name: "Columbina inca"
}
item {
name: "4883"
id: 1691
display_name: "Recurvirostra avosetta"
}
item {
name: "204701"
id: 1692
display_name: "Melanchra adjuncta"
}
item {
name: "56083"
id: 1693
display_name: "Armadillidium vulgare"
}
item {
name: "981"
id: 1694
display_name: "Phasianus colchicus"
}
item {
name: "4893"
id: 1695
display_name: "Pluvialis dominica"
}
item {
name: "103200"
id: 1696
display_name: "Hypsiglena jani"
}
item {
name: "127777"
id: 1697
display_name: "Vespula vulgaris"
}
item {
name: "7643"
id: 1698
display_name: "Cinclus mexicanus"
}
item {
name: "13094"
id: 1699
display_name: "Erithacus rubecula"
}
item {
name: "41777"
id: 1700
display_name: "Lontra canadensis"
}
item {
name: "64988"
id: 1701
display_name: "Anaxyrus terrestris"
}
item {
name: "18167"
id: 1702
display_name: "Melanerpes aurifrons"
}
item {
name: "54064"
id: 1703
display_name: "Polygonia comma"
}
item {
name: "209713"
id: 1704
display_name: "Phigalia titea"
}
item {
name: "54068"
id: 1705
display_name: "Boloria selene"
}
item {
name: "104585"
id: 1706
display_name: "Libellula semifasciata"
}
item {
name: "119608"
id: 1707
display_name: "Theba pisana"
}
item {
name: "4801"
id: 1708
display_name: "Charadrius hiaticula"
}
item {
name: "104586"
id: 1709
display_name: "Libellula vibrans"
}
item {
name: "4935"
id: 1710
display_name: "Egretta gularis"
}
item {
name: "4937"
id: 1711
display_name: "Egretta caerulea"
}
item {
name: "4938"
id: 1712
display_name: "Egretta tricolor"
}
item {
name: "4940"
id: 1713
display_name: "Egretta thula"
}
item {
name: "340813"
id: 1714
display_name: "Hyalymenus tarsatus"
}
item {
name: "4943"
id: 1715
display_name: "Egretta garzetta"
}
item {
name: "4947"
id: 1716
display_name: "Egretta sacra"
}
item {
name: "13141"
id: 1717
display_name: "Monticola solitarius"
}
item {
name: "4952"
id: 1718
display_name: "Ardea cocoi"
}
item {
name: "4954"
id: 1719
display_name: "Ardea cinerea"
}
item {
name: "67727"
id: 1720
display_name: "Aeshna umbrosa"
}
item {
name: "4956"
id: 1721
display_name: "Ardea herodias"
}
item {
name: "144223"
id: 1722
display_name: "Chlosyne theona"
}
item {
name: "201568"
id: 1723
display_name: "Diabrotica undecimpunctata undecimpunctata"
}
item {
name: "47383"
id: 1724
display_name: "Latrodectus geometricus"
}
item {
name: "119664"
id: 1725
display_name: "Cacyreus marshalli"
}
item {
name: "62321"
id: 1726
display_name: "Rutpela maculata"
}
item {
name: "217970"
id: 1727
display_name: "Cyclophora pendulinaria"
}
item {
name: "4981"
id: 1728
display_name: "Nycticorax nycticorax"
}
item {
name: "12714"
id: 1729
display_name: "Turdus rufopalliatus"
}
item {
name: "4994"
id: 1730
display_name: "Ardeola ralloides"
}
item {
name: "4999"
id: 1731
display_name: "Nyctanassa violacea"
}
item {
name: "37769"
id: 1732
display_name: "Plestiodon skiltonianus"
}
item {
name: "213826"
id: 1733
display_name: "Apamea amputatrix"
}
item {
name: "67736"
id: 1734
display_name: "Rhionaeschna californica"
}
item {
name: "155380"
id: 1735
display_name: "Andricus crystallinus"
}
item {
name: "144280"
id: 1736
display_name: "Aramides cajaneus"
}
item {
name: "5017"
id: 1737
display_name: "Bubulcus ibis"
}
item {
name: "5020"
id: 1738
display_name: "Butorides virescens"
}
item {
name: "144285"
id: 1739
display_name: "Porphyrio martinicus"
}
item {
name: "81729"
id: 1740
display_name: "Feniseca tarquinius"
}
item {
name: "127905"
id: 1741
display_name: "Bombus ternarius"
}
item {
name: "5034"
id: 1742
display_name: "Botaurus lentiginosus"
}
item {
name: "29330"
id: 1743
display_name: "Nerodia erythrogaster transversa"
}
item {
name: "5036"
id: 1744
display_name: "Cochlearius cochlearius"
}
item {
name: "46001"
id: 1745
display_name: "Sciurus vulgaris"
}
item {
name: "46005"
id: 1746
display_name: "Sciurus variegatoides"
}
item {
name: "127928"
id: 1747
display_name: "Autochton cellus"
}
item {
name: "340923"
id: 1748
display_name: "Scolypopa australis"
}
item {
name: "46017"
id: 1749
display_name: "Sciurus carolinensis"
}
item {
name: "46018"
id: 1750
display_name: "Sciurus aberti"
}
item {
name: "447427"
id: 1751
display_name: "Neverita lewisii"
}
item {
name: "46020"
id: 1752
display_name: "Sciurus niger"
}
item {
name: "5061"
id: 1753
display_name: "Anhinga novaehollandiae"
}
item {
name: "46023"
id: 1754
display_name: "Sciurus griseus"
}
item {
name: "122375"
id: 1755
display_name: "Carterocephalus palaemon"
}
item {
name: "5066"
id: 1756
display_name: "Anhinga rufa"
}
item {
name: "145289"
id: 1757
display_name: "Melozone fusca"
}
item {
name: "5074"
id: 1758
display_name: "Aquila chrysaetos"
}
item {
name: "49998"
id: 1759
display_name: "Thamnophis sirtalis infernalis"
}
item {
name: "13270"
id: 1760
display_name: "Hylocichla mustelina"
}
item {
name: "62423"
id: 1761
display_name: "Cimbex americana"
}
item {
name: "62424"
id: 1762
display_name: "Sitochroa palealis"
}
item {
name: "111578"
id: 1763
display_name: "Regina grahamii"
}
item {
name: "144207"
id: 1764
display_name: "Aphelocoma wollweberi"
}
item {
name: "62429"
id: 1765
display_name: "Pyronia tithonus"
}
item {
name: "47934"
id: 1766
display_name: "Libellula luctuosa"
}
item {
name: "50000"
id: 1767
display_name: "Clemmys guttata"
}
item {
name: "5097"
id: 1768
display_name: "Accipiter striatus"
}
item {
name: "119789"
id: 1769
display_name: "Cisseps fulvicollis"
}
item {
name: "5106"
id: 1770
display_name: "Accipiter nisus"
}
item {
name: "5108"
id: 1771
display_name: "Accipiter gentilis"
}
item {
name: "62456"
id: 1772
display_name: "Rhagonycha fulva"
}
item {
name: "4948"
id: 1773
display_name: "Egretta rufescens"
}
item {
name: "46082"
id: 1774
display_name: "Marmota marmota"
}
item {
name: "6990"
id: 1775
display_name: "Bucephala clangula"
}
item {
name: "4535"
id: 1776
display_name: "Anous stolidus"
}
item {
name: "46087"
id: 1777
display_name: "Marmota caligata"
}
item {
name: "72458"
id: 1778
display_name: "Actitis macularius"
}
item {
name: "4951"
id: 1779
display_name: "Ardea purpurea"
}
item {
name: "128012"
id: 1780
display_name: "Eumorpha fasciatus"
}
item {
name: "472078"
id: 1781
display_name: "Todiramphus chloris"
}
item {
name: "46095"
id: 1782
display_name: "Marmota monax"
}
item {
name: "34"
id: 1783
display_name: "Grus americana"
}
item {
name: "4835"
id: 1784
display_name: "Himantopus himantopus"
}
item {
name: "122374"
id: 1785
display_name: "Eurema mexicana"
}
item {
name: "19812"
id: 1786
display_name: "Glaucidium gnoma"
}
item {
name: "73823"
id: 1787
display_name: "Hierophis viridiflavus"
}
item {
name: "5168"
id: 1788
display_name: "Circus approximans"
}
item {
name: "143110"
id: 1789
display_name: "Hypagyrtis unipunctata"
}
item {
name: "65976"
id: 1790
display_name: "Lithobates blairi"
}
item {
name: "5173"
id: 1791
display_name: "Circus aeruginosus"
}
item {
name: "54327"
id: 1792
display_name: "Vespa crabro"
}
item {
name: "4273"
id: 1793
display_name: "Phalacrocorax sulcirostris"
}
item {
name: "5180"
id: 1794
display_name: "Buteo albonotatus"
}
item {
name: "103485"
id: 1795
display_name: "Ischnura denticollis"
}
item {
name: "62528"
id: 1796
display_name: "Butorides striata"
}
item {
name: "62529"
id: 1797
display_name: "Platalea ajaja"
}
item {
name: "5186"
id: 1798
display_name: "Buteo brachyurus"
}
item {
name: "103494"
id: 1799
display_name: "Ischnura hastata"
}
item {
name: "144455"
id: 1800
display_name: "Ardea alba"
}
item {
name: "103497"
id: 1801
display_name: "Ischnura perparva"
}
item {
name: "103498"
id: 1802
display_name: "Ischnura posita"
}
item {
name: "5196"
id: 1803
display_name: "Buteo swainsoni"
}
item {
name: "128079"
id: 1804
display_name: "Grammia ornata"
}
item {
name: "29777"
id: 1805
display_name: "Lampropeltis triangulum"
}
item {
name: "867"
id: 1806
display_name: "Alectoris rufa"
}
item {
name: "5206"
id: 1807
display_name: "Buteo lineatus"
}
item {
name: "29783"
id: 1808
display_name: "Lampropeltis triangulum triangulum"
}
item {
name: "122383"
id: 1809
display_name: "Plebejus melissa"
}
item {
name: "5212"
id: 1810
display_name: "Buteo jamaicensis"
}
item {
name: "81495"
id: 1811
display_name: "Libellula pulchella"
}
item {
name: "35003"
id: 1812
display_name: "Heloderma suspectum"
}
item {
name: "46180"
id: 1813
display_name: "Cynomys gunnisoni"
}
item {
name: "144485"
id: 1814
display_name: "Charadrius nivosus"
}
item {
name: "144490"
id: 1815
display_name: "Tringa incana"
}
item {
name: "144491"
id: 1816
display_name: "Tringa semipalmata"
}
item {
name: "25185"
id: 1817
display_name: "Hypopachus variolosus"
}
item {
name: "5231"
id: 1818
display_name: "Terathopius ecaudatus"
}
item {
name: "144496"
id: 1819
display_name: "Gallinago delicata"
}
item {
name: "5233"
id: 1820
display_name: "Buteogallus anthracinus"
}
item {
name: "211035"
id: 1821
display_name: "Speranza pustularia"
}
item {
name: "29813"
id: 1822
display_name: "Lampropeltis getula"
}
item {
name: "144502"
id: 1823
display_name: "Chroicocephalus philadelphia"
}
item {
name: "5242"
id: 1824
display_name: "Circaetus gallicus"
}
item {
name: "144507"
id: 1825
display_name: "Chroicocephalus novaehollandiae"
}
item {
name: "144510"
id: 1826
display_name: "Chroicocephalus ridibundus"
}
item {
name: "52757"
id: 1827
display_name: "Polistes fuscatus"
}
item {
name: "144514"
id: 1828
display_name: "Leucophaeus atricilla"
}
item {
name: "144515"
id: 1829
display_name: "Leucophaeus pipixcan"
}
item {
name: "46217"
id: 1830
display_name: "Tamias striatus"
}
item {
name: "144525"
id: 1831
display_name: "Onychoprion fuscatus"
}
item {
name: "46222"
id: 1832
display_name: "Tamias minimus"
}
item {
name: "144530"
id: 1833
display_name: "Sternula antillarum"
}
item {
name: "46230"
id: 1834
display_name: "Tamias merriami"
}
item {
name: "144537"
id: 1835
display_name: "Hydroprogne caspia"
}
item {
name: "144539"
id: 1836
display_name: "Thalasseus maximus"
}
item {
name: "144540"
id: 1837
display_name: "Thalasseus bergii"
}
item {
name: "5277"
id: 1838
display_name: "Elanus leucurus"
}
item {
name: "324766"
id: 1839
display_name: "Epicallima argenticinctella"
}
item {
name: "72486"
id: 1840
display_name: "Alopochen aegyptiaca"
}
item {
name: "62229"
id: 1841
display_name: "Ischnura cervula"
}
item {
name: "144550"
id: 1842
display_name: "Streptopelia senegalensis"
}
item {
name: "46256"
id: 1843
display_name: "Ammospermophilus harrisii"
}
item {
name: "94559"
id: 1844
display_name: "Argia nahuana"
}
item {
name: "46259"
id: 1845
display_name: "Tamiasciurus douglasii"
}
item {
name: "46260"
id: 1846
display_name: "Tamiasciurus hudsonicus"
}
item {
name: "119989"
id: 1847
display_name: "Stagmomantis carolina"
}
item {
name: "13494"
id: 1848
display_name: "Gerygone igata"
}
item {
name: "5305"
id: 1849
display_name: "Haliaeetus leucocephalus"
}
item {
name: "7596"
id: 1850
display_name: "Cistothorus platensis"
}
item {
name: "5308"
id: 1851
display_name: "Haliaeetus vocifer"
}
item {
name: "218301"
id: 1852
display_name: "Diacme elealis"
}
item {
name: "95422"
id: 1853
display_name: "Basiaeschna janata"
}
item {
name: "46272"
id: 1854
display_name: "Glaucomys volans"
}
item {
name: "120010"
id: 1855
display_name: "Polistes metricus"
}
item {
name: "144594"
id: 1856
display_name: "Bubo scandiacus"
}
item {
name: "52771"
id: 1857
display_name: "Gonepteryx rhamni"
}
item {
name: "144597"
id: 1858
display_name: "Ciccaba virgata"
}
item {
name: "890"
id: 1859
display_name: "Bonasa umbellus"
}
item {
name: "52773"
id: 1860
display_name: "Poanes zabulon"
}
item {
name: "120033"
id: 1861
display_name: "Lapara bombycoides"
}
item {
name: "5346"
id: 1862
display_name: "Busarellus nigricollis"
}
item {
name: "5349"
id: 1863
display_name: "Rostrhamus sociabilis"
}
item {
name: "36391"
id: 1864
display_name: "Anolis equestris"
}
item {
name: "46316"
id: 1865
display_name: "Trichechus manatus"
}
item {
name: "5267"
id: 1866
display_name: "Milvus milvus"
}
item {
name: "128241"
id: 1867
display_name: "Darapsa choerilus"
}
item {
name: "128242"
id: 1868
display_name: "Palthis angulalis"
}
item {
name: "5366"
id: 1869
display_name: "Gyps fulvus"
}
item {
name: "204512"
id: 1870
display_name: "Ficedula hypoleuca"
}
item {
name: "54526"
id: 1871
display_name: "Crassadoma gigantea"
}
item {
name: "144642"
id: 1872
display_name: "Momotus coeruliceps"
}
item {
name: "120070"
id: 1873
display_name: "Strongylocentrotus droebachiensis"
}
item {
name: "54538"
id: 1874
display_name: "Syngnathus leptorhynchus"
}
item {
name: "81746"
id: 1875
display_name: "Necrophila americana"
}
item {
name: "300301"
id: 1876
display_name: "Pseudomyrmex gracilis"
}
item {
name: "202003"
id: 1877
display_name: "Apiomerus spissipes"
}
item {
name: "41860"
id: 1878
display_name: "Enhydra lutris"
}
item {
name: "4817"
id: 1879
display_name: "Charadrius semipalmatus"
}
item {
name: "36145"
id: 1880
display_name: "Sceloporus variabilis"
}
item {
name: "202012"
id: 1881
display_name: "Steatoda capensis"
}
item {
name: "62749"
id: 1882
display_name: "Iphiclides podalirius"
}
item {
name: "5406"
id: 1883
display_name: "Haliastur indus"
}
item {
name: "62751"
id: 1884
display_name: "Andricus kingi"
}
item {
name: "5363"
id: 1885
display_name: "Gyps africanus"
}
item {
name: "5416"
id: 1886
display_name: "Ictinia mississippiensis"
}
item {
name: "62766"
id: 1887
display_name: "Issoria lathonia"
}
item {
name: "62768"
id: 1888
display_name: "Scolia dubia"
}
item {
name: "126206"
id: 1889
display_name: "Dissosteira carolina"
}
item {
name: "269875"
id: 1890
display_name: "Mallodon dasystomus"
}
item {
name: "155030"
id: 1891
display_name: "Limenitis reducta"
}
item {
name: "62345"
id: 1892
display_name: "Duttaphrynus melanostictus"
}
item {
name: "52519"
id: 1893
display_name: "Aeshna cyanea"
}
item {
name: "10001"
id: 1894
display_name: "Dives dives"
}
item {
name: "460365"
id: 1895
display_name: "Tegula funebralis"
}
item {
name: "13631"
id: 1896
display_name: "Baeolophus atricristatus"
}
item {
name: "13632"
id: 1897
display_name: "Baeolophus bicolor"
}
item {
name: "13633"
id: 1898
display_name: "Baeolophus inornatus"
}
item {
name: "9100"
id: 1899
display_name: "Melospiza melodia"
}
item {
name: "62796"
id: 1900
display_name: "Crotaphytus bicinctores"
}
item {
name: "62797"
id: 1901
display_name: "Gambelia wislizenii"
}
item {
name: "46009"
id: 1902
display_name: "Sciurus aureogaster"
}
item {
name: "112867"
id: 1903
display_name: "Sparisoma viride"
}
item {
name: "70997"
id: 1904
display_name: "Pelecinus polyturator"
}
item {
name: "62806"
id: 1905
display_name: "Mytilus californianus"
}
item {
name: "120156"
id: 1906
display_name: "Musca domestica"
}
item {
name: "136548"
id: 1907
display_name: "Euclea delphinii"
}
item {
name: "50065"
id: 1908
display_name: "Danaus eresimus"
}
item {
name: "43239"
id: 1909
display_name: "Tachyglossus aculeatus"
}
item {
name: "145303"
id: 1910
display_name: "Spinus spinus"
}
item {
name: "120183"
id: 1911
display_name: "Araneus marmoreus"
}
item {
name: "71032"
id: 1912
display_name: "Crotalus scutulatus scutulatus"
}
item {
name: "71034"
id: 1913
display_name: "Tenodera sinensis"
}
item {
name: "143121"
id: 1914
display_name: "Ochropleura implecta"
}
item {
name: "13695"
id: 1915
display_name: "Motacilla alba"
}
item {
name: "7458"
id: 1916
display_name: "Certhia americana"
}
item {
name: "38293"
id: 1917
display_name: "Lampropholis delicata"
}
item {
name: "144281"
id: 1918
display_name: "Bucorvus leadbeateri"
}
item {
name: "120217"
id: 1919
display_name: "Halysidota tessellaris"
}
item {
name: "226718"
id: 1920
display_name: "Otiorhynchus sulcatus"
}
item {
name: "464287"
id: 1921
display_name: "Anteaeolidiella oliviae"
}
item {
name: "226720"
id: 1922
display_name: "Oxychilus draparnaudi"
}
item {
name: "13729"
id: 1923
display_name: "Anthus pratensis"
}
item {
name: "13732"
id: 1924
display_name: "Anthus rubescens"
}
item {
name: "11930"
id: 1925
display_name: "Tachycineta albilinea"
}
item {
name: "71085"
id: 1926
display_name: "Varanus niloticus"
}
item {
name: "144814"
id: 1927
display_name: "Poecile carolinensis"
}
item {
name: "144815"
id: 1928
display_name: "Poecile atricapillus"
}
item {
name: "144816"
id: 1929
display_name: "Poecile gambeli"
}
item {
name: "144820"
id: 1930
display_name: "Poecile rufescens"
}
item {
name: "144823"
id: 1931
display_name: "Periparus ater"
}
item {
name: "10485"
id: 1932
display_name: "Chlorophanes spiza"
}
item {
name: "40523"
id: 1933
display_name: "Lasiurus cinereus"
}
item {
name: "47719"
id: 1934
display_name: "Datana ministra"
}
item {
name: "13770"
id: 1935
display_name: "Estrilda astrild"
}
item {
name: "144849"
id: 1936
display_name: "Cyanistes caeruleus"
}
item {
name: "218587"
id: 1937
display_name: "Discus rotundatus"
}
item {
name: "47105"
id: 1938
display_name: "Tamandua mexicana"
}
item {
name: "18463"
id: 1939
display_name: "Sphyrapicus varius"
}
item {
name: "11858"
id: 1940
display_name: "Petrochelidon pyrrhonota"
}
item {
name: "144882"
id: 1941
display_name: "Troglodytes pacificus"
}
item {
name: "144883"
id: 1942
display_name: "Troglodytes hiemalis"
}
item {
name: "153076"
id: 1943
display_name: "Nephelodes minians"
}
item {
name: "62978"
id: 1944
display_name: "Chlosyne nycteis"
}
item {
name: "128517"
id: 1945
display_name: "Catocala ilia"
}
item {
name: "153102"
id: 1946
display_name: "Dysphania militaris"
}
item {
name: "59651"
id: 1947
display_name: "Aquarius remigis"
}
item {
name: "13851"
id: 1948
display_name: "Passer montanus"
}
item {
name: "13858"
id: 1949
display_name: "Passer domesticus"
}
item {
name: "39742"
id: 1950
display_name: "Kinosternon flavescens"
}
item {
name: "506118"
id: 1951
display_name: "Aphelocoma californica"
}
item {
name: "5672"
id: 1952
display_name: "Amazilia yucatanensis"
}
item {
name: "5676"
id: 1953
display_name: "Amazilia tzacatl"
}
item {
name: "204503"
id: 1954
display_name: "Dicrurus adsimilis"
}
item {
name: "52785"
id: 1955
display_name: "Megachile sculpturalis"
}
item {
name: "126905"
id: 1956
display_name: "Harrisina americana"
}
item {
name: "55773"
id: 1957
display_name: "Promachus hinei"
}
item {
name: "84752"
id: 1958
display_name: "Microcentrum rhombifolium"
}
item {
name: "5698"
id: 1959
display_name: "Amazilia violiceps"
}
item {
name: "145539"
id: 1960
display_name: "Ovis canadensis nelsoni"
}
item {
name: "104004"
id: 1961
display_name: "Lampropeltis splendida"
}
item {
name: "13893"
id: 1962
display_name: "Lonchura punctulata"
}
item {
name: "63048"
id: 1963
display_name: "Nuttallina californica"
}
item {
name: "226901"
id: 1964
display_name: "Panopoda rufimargo"
}
item {
name: "194134"
id: 1965
display_name: "Anthanassa tulcis"
}
item {
name: "5049"
id: 1966
display_name: "Tigrisoma mexicanum"
}
item {
name: "407130"
id: 1967
display_name: "Porphyrio melanotus melanotus"
}
item {
name: "226910"
id: 1968
display_name: "Panthea furcilla"
}
item {
name: "130661"
id: 1969
display_name: "Catasticta nimbice"
}
item {
name: "120215"
id: 1970
display_name: "Bombus griseocollis"
}
item {
name: "144220"
id: 1971
display_name: "Melanitta americana"
}
item {
name: "9148"
id: 1972
display_name: "Spizella pallida"
}
item {
name: "320610"
id: 1973
display_name: "Sceloporus magister"
}
item {
name: "54900"
id: 1974
display_name: "Papilio polyxenes asterius"
}
item {
name: "36080"
id: 1975
display_name: "Callisaurus draconoides"
}
item {
name: "5758"
id: 1976
display_name: "Amazilia rutila"
}
item {
name: "3465"
id: 1977
display_name: "Zenaida aurita"
}
item {
name: "116461"
id: 1978
display_name: "Anolis sagrei"
}
item {
name: "61295"
id: 1979
display_name: "Aporia crataegi"
}
item {
name: "131673"
id: 1980
display_name: "Tetracis cachexiata"
}
item {
name: "63113"
id: 1981
display_name: "Blarina brevicauda"
}
item {
name: "26904"
id: 1982
display_name: "Coronella austriaca"
}
item {
name: "94575"
id: 1983
display_name: "Argia tibialis"
}
item {
name: "237166"
id: 1984
display_name: "Lycaena phlaeas hypophlaeas"
}
item {
name: "129305"
id: 1985
display_name: "Melanoplus bivittatus"
}
item {
name: "63128"
id: 1986
display_name: "Speyeria atlantis"
}
item {
name: "113514"
id: 1987
display_name: "Sympetrum internum"
}
item {
name: "48757"
id: 1988
display_name: "Echinothrix calamaris"
}
item {
name: "128670"
id: 1989
display_name: "Bombus vagans"
}
item {
name: "13988"
id: 1990
display_name: "Prunella modularis"
}
item {
name: "54951"
id: 1991
display_name: "Anartia fatima"
}
item {
name: "54952"
id: 1992
display_name: "Cardisoma guanhumi"
}
item {
name: "325295"
id: 1993
display_name: "Cydalima perspectalis"
}
item {
name: "63160"
id: 1994
display_name: "Celithemis elisa"
}
item {
name: "210615"
id: 1995
display_name: "Pyrausta volupialis"
}
item {
name: "472766"
id: 1996
display_name: "Falco tinnunculus"
}
item {
name: "29927"
id: 1997
display_name: "Heterodon nasicus"
}
item {
name: "145088"
id: 1998
display_name: "Ixoreus naevius"
}
item {
name: "6432"
id: 1999
display_name: "Archilochus colubris"
}
item {
name: "5827"
id: 2000
display_name: "Lampornis clemenciae"
}
item {
name: "15990"
id: 2001
display_name: "Myiarchus tuberculifer"
}
item {
name: "128712"
id: 2002
display_name: "Coccinella californica"
}
item {
name: "67559"
id: 2003
display_name: "Adelpha eulalia"
}
item {
name: "128719"
id: 2004
display_name: "Echinometra mathaei"
}
item {
name: "10247"
id: 2005
display_name: "Setophaga ruticilla"
}
item {
name: "202451"
id: 2006
display_name: "Copaeodes minima"
}
item {
name: "95958"
id: 2007
display_name: "Boyeria vinosa"
}
item {
name: "16016"
id: 2008
display_name: "Myiarchus tyrannulus"
}
item {
name: "36202"
id: 2009
display_name: "Sceloporus olivaceus"
}
item {
name: "95982"
id: 2010
display_name: "Brachymesia furcata"
}
item {
name: "126589"
id: 2011
display_name: "Calycopis isobeon"
}
item {
name: "120578"
id: 2012
display_name: "Micrathena sagittata"
}
item {
name: "194690"
id: 2013
display_name: "Pogonomyrmex barbatus"
}
item {
name: "120583"
id: 2014
display_name: "Parasteatoda tepidariorum"
}
item {
name: "202505"
id: 2015
display_name: "Zosterops lateralis"
}
item {
name: "38671"
id: 2016
display_name: "Aspidoscelis tigris"
}
item {
name: "38672"
id: 2017
display_name: "Aspidoscelis tigris stejnegeri"
}
item {
name: "9176"
id: 2018
display_name: "Zonotrichia leucophrys"
}
item {
name: "120596"
id: 2019
display_name: "Aphonopelma hentzi"
}
item {
name: "9744"
id: 2020
display_name: "Agelaius phoeniceus"
}
item {
name: "38684"
id: 2021
display_name: "Aspidoscelis tigris mundus"
}
item {
name: "62426"
id: 2022
display_name: "Aphantopus hyperantus"
}
item {
name: "30494"
id: 2023
display_name: "Micrurus tener"
}
item {
name: "58578"
id: 2024
display_name: "Euphydryas phaeton"
}
item {
name: "96036"
id: 2025
display_name: "Brechmorhoga mendax"
}
item {
name: "333608"
id: 2026
display_name: "Leukoma staminea"
}
item {
name: "38703"
id: 2027
display_name: "Aspidoscelis sexlineata sexlineata"
}
item {
name: "126600"
id: 2028
display_name: "Chortophaga viridifasciata"
}
item {
name: "63287"
id: 2029
display_name: "Megalorchestia californiana"
}
item {
name: "128824"
id: 2030
display_name: "Lucilia sericata"
}
item {
name: "104249"
id: 2031
display_name: "Lepisosteus oculatus"
}
item {
name: "203153"
id: 2032
display_name: "Parus major"
}
item {
name: "9183"
id: 2033
display_name: "Zonotrichia capensis"
}
item {
name: "82201"
id: 2034
display_name: "Hypena baltimoralis"
}
item {
name: "145217"
id: 2035
display_name: "Oreothlypis peregrina"
}
item {
name: "145218"
id: 2036
display_name: "Oreothlypis celata"
}
item {
name: "145221"
id: 2037
display_name: "Oreothlypis ruficapilla"
}
item {
name: "145224"
id: 2038
display_name: "Geothlypis philadelphia"
}
item {
name: "145225"
id: 2039
display_name: "Geothlypis formosa"
}
item {
name: "448331"
id: 2040
display_name: "Ambigolimax valentianus"
}
item {
name: "128845"
id: 2041
display_name: "Copestylum mexicanum"
}
item {
name: "145231"
id: 2042
display_name: "Setophaga tigrina"
}
item {
name: "145233"
id: 2043
display_name: "Setophaga americana"
}
item {
name: "145235"
id: 2044
display_name: "Setophaga magnolia"
}
item {
name: "145236"
id: 2045
display_name: "Setophaga castanea"
}
item {
name: "145237"
id: 2046
display_name: "Setophaga fusca"
}
item {
name: "145238"
id: 2047
display_name: "Setophaga petechia"
}
item {
name: "145240"
id: 2048
display_name: "Setophaga striata"
}
item {
name: "145242"
id: 2049
display_name: "Setophaga palmarum"
}
item {
name: "179855"
id: 2050
display_name: "Polites vibex"
}
item {
name: "145244"
id: 2051
display_name: "Setophaga pinus"
}
item {
name: "145245"
id: 2052
display_name: "Setophaga coronata"
}
item {
name: "145246"
id: 2053
display_name: "Setophaga dominica"
}
item {
name: "5987"
id: 2054
display_name: "Campylopterus hemileucurus"
}
item {
name: "17382"
id: 2055
display_name: "Vireo cassinii"
}
item {
name: "145254"
id: 2056
display_name: "Setophaga nigrescens"
}
item {
name: "145255"
id: 2057
display_name: "Setophaga townsendi"
}
item {
name: "145256"
id: 2058
display_name: "Setophaga occidentalis"
}
item {
name: "145257"
id: 2059
display_name: "Setophaga chrysoparia"
}
item {
name: "145258"
id: 2060
display_name: "Setophaga virens"
}
item {
name: "48786"
id: 2061
display_name: "Pollicipes polymerus"
}
item {
name: "36207"
id: 2062
display_name: "Sceloporus occidentalis longipes"
}
item {
name: "22392"
id: 2063
display_name: "Eleutherodactylus marnockii"
}
item {
name: "22393"
id: 2064
display_name: "Eleutherodactylus cystignathoides"
}
item {
name: "145275"
id: 2065
display_name: "Cardellina canadensis"
}
item {
name: "145277"
id: 2066
display_name: "Cardellina rubra"
}
item {
name: "7829"
id: 2067
display_name: "Aphelocoma coerulescens"
}
item {
name: "41963"
id: 2068
display_name: "Panthera pardus"
}
item {
name: "142998"
id: 2069
display_name: "Pyrausta acrionalis"
}
item {
name: "18204"
id: 2070
display_name: "Melanerpes erythrocephalus"
}
item {
name: "47425"
id: 2071
display_name: "Tonicella lineata"
}
item {
name: "148460"
id: 2072
display_name: "Charadra deridens"
}
item {
name: "145291"
id: 2073
display_name: "Emberiza calandra"
}
item {
name: "52523"
id: 2074
display_name: "Carcinus maenas"
}
item {
name: "46994"
id: 2075
display_name: "Scapanus latimanus"
}
item {
name: "114314"
id: 2076
display_name: "Tramea onusta"
}
item {
name: "145300"
id: 2077
display_name: "Acanthis flammea"
}
item {
name: "63382"
id: 2078
display_name: "Dermasterias imbricata"
}
item {
name: "126772"
id: 2079
display_name: "Ursus americanus californiensis"
}
item {
name: "145304"
id: 2080
display_name: "Spinus pinus"
}
item {
name: "10294"
id: 2081
display_name: "Thraupis abbas"
}
item {
name: "145308"
id: 2082
display_name: "Spinus psaltria"
}
item {
name: "145309"
id: 2083
display_name: "Spinus lawrencei"
}
item {
name: "145310"
id: 2084
display_name: "Spinus tristis"
}
item {
name: "3739"
id: 2085
display_name: "Threskiornis aethiopicus"
}
item {
name: "47014"
id: 2086
display_name: "Scalopus aquaticus"
}
item {
name: "4566"
id: 2087
display_name: "Gygis alba"
}
item {
name: "43335"
id: 2088
display_name: "Equus quagga"
}
item {
name: "41970"
id: 2089
display_name: "Panthera onca"
}
item {
name: "128950"
id: 2090
display_name: "Lycomorpha pholus"
}
item {
name: "11935"
id: 2091
display_name: "Tachycineta bicolor"
}
item {
name: "333759"
id: 2092
display_name: "Larus dominicanus dominicanus"
}
item {
name: "143008"
id: 2093
display_name: "Herpetogramma pertextalis"
}
item {
name: "235341"
id: 2094
display_name: "Coenonympha tullia california"
}
item {
name: "44705"
id: 2095
display_name: "Mus musculus"
}
item {
name: "145352"
id: 2096
display_name: "Lonchura oryzivora"
}
item {
name: "4840"
id: 2097
display_name: "Haematopus palliatus"
}
item {
name: "244845"
id: 2098
display_name: "Apiomerus californicus"
}
item {
name: "145360"
id: 2099
display_name: "Chloris chloris"
}
item {
name: "5112"
id: 2100
display_name: "Accipiter cooperii"
}
item {
name: "30675"
id: 2101
display_name: "Agkistrodon piscivorus"
}
item {
name: "341972"
id: 2102
display_name: "Crocodylus niloticus"
}
item {
name: "30677"
id: 2103
display_name: "Agkistrodon piscivorus conanti"
}
item {
name: "30678"
id: 2104
display_name: "Agkistrodon contortrix"
}
item {
name: "52900"
id: 2105
display_name: "Caenurgina crassiuscula"
}
item {
name: "30682"
id: 2106
display_name: "Agkistrodon contortrix laticinctus"
}
item {
name: "47067"
id: 2107
display_name: "Bradypus variegatus"
}
item {
name: "55260"
id: 2108
display_name: "Erythemis vesiculosa"
}
item {
name: "17402"
id: 2109
display_name: "Vireo solitarius"
}
item {
name: "6369"
id: 2110
display_name: "Selasphorus platycercus"
}
item {
name: "104416"
id: 2111
display_name: "Lestes alacer"
}
item {
name: "128993"
id: 2112
display_name: "Narceus annularus"
}
item {
name: "104422"
id: 2113
display_name: "Lestes congener"
}
item {
name: "227307"
id: 2114
display_name: "Patalene olyzonaria"
}
item {
name: "104429"
id: 2115
display_name: "Lestes dryas"
}
item {
name: "194542"
id: 2116
display_name: "Phyciodes graphica"
}
item {
name: "52904"
id: 2117
display_name: "Microcrambus elegans"
}
item {
name: "129363"
id: 2118
display_name: "Calephelis nemesis"
}
item {
name: "144506"
id: 2119
display_name: "Chroicocephalus scopulinus"
}
item {
name: "30713"
id: 2120
display_name: "Crotalus oreganus helleri"
}
item {
name: "47101"
id: 2121
display_name: "Choloepus hoffmanni"
}
item {
name: "210942"
id: 2122
display_name: "Caedicia simplex"
}
item {
name: "30719"
id: 2123
display_name: "Crotalus scutulatus"
}
item {
name: "30724"
id: 2124
display_name: "Crotalus ruber"
}
item {
name: "47110"
id: 2125
display_name: "Triopha maculata"
}
item {
name: "4235"
id: 2126
display_name: "Aechmophorus occidentalis"
}
item {
name: "30731"
id: 2127
display_name: "Crotalus molossus"
}
item {
name: "30733"
id: 2128
display_name: "Crotalus molossus nigrescens"
}
item {
name: "30735"
id: 2129
display_name: "Crotalus mitchellii"
}
item {
name: "30740"
id: 2130
display_name: "Crotalus lepidus"
}
item {
name: "30746"
id: 2131
display_name: "Crotalus horridus"
}
item {
name: "63518"
id: 2132
display_name: "Melanoplus differentialis"
}
item {
name: "30751"
id: 2133
display_name: "Crotalus cerastes"
}
item {
name: "126640"
id: 2134
display_name: "Caenurgina erechtea"
}
item {
name: "46086"
id: 2135
display_name: "Marmota flaviventris"
}
item {
name: "194599"
id: 2136
display_name: "Heliomata cycladata"
}
item {
name: "30764"
id: 2137
display_name: "Crotalus atrox"
}
item {
name: "204520"
id: 2138
display_name: "Hemiphaga novaeseelandiae"
}
item {
name: "128141"
id: 2139
display_name: "Crepidula adunca"
}
item {
name: "121183"
id: 2140
display_name: "Mythimna unipuncta"
}
item {
name: "40827"
id: 2141
display_name: "Eidolon helvum"
}
item {
name: "4571"
id: 2142
display_name: "Xema sabini"
}
item {
name: "211007"
id: 2143
display_name: "Nepytia canosaria"
}
item {
name: "47171"
id: 2144
display_name: "Flabellina iodinea"
}
item {
name: "211012"
id: 2145
display_name: "Maliattha synochitis"
}
item {
name: "30798"
id: 2146
display_name: "Bothrops asper"
}
item {
name: "47188"
id: 2147
display_name: "Pachygrapsus crassipes"
}
item {
name: "55387"
id: 2148
display_name: "Esox lucius"
}
item {
name: "58583"
id: 2149
display_name: "Limenitis arthemis arthemis"
}
item {
name: "104548"
id: 2150
display_name: "Leucorrhinia frigida"
}
item {
name: "104550"
id: 2151
display_name: "Leucorrhinia hudsonica"
}
item {
name: "104551"
id: 2152
display_name: "Leucorrhinia intacta"
}
item {
name: "47209"
id: 2153
display_name: "Hermissenda crassicornis"
}
item {
name: "55655"
id: 2154
display_name: "Lycaena phlaeas"
}
item {
name: "202861"
id: 2155
display_name: "Otala lactea"
}
item {
name: "143037"
id: 2156
display_name: "Lineodes integra"
}
item {
name: "47219"
id: 2157
display_name: "Apis mellifera"
}
item {
name: "24254"
id: 2158
display_name: "Pseudacris cadaverina"
}
item {
name: "47226"
id: 2159
display_name: "Papilio rutulus"
}
item {
name: "104572"
id: 2160
display_name: "Libellula comanche"
}
item {
name: "104574"
id: 2161
display_name: "Libellula croceipennis"
}
item {
name: "104575"
id: 2162
display_name: "Libellula cyanea"
}
item {
name: "145538"
id: 2163
display_name: "Ovis canadensis canadensis"
}
item {
name: "104580"
id: 2164
display_name: "Libellula incesta"
}
item {
name: "24257"
id: 2165
display_name: "Pseudacris streckeri"
}
item {
name: "53866"
id: 2166
display_name: "Calpodes ethlius"
}
item {
name: "18796"
id: 2167
display_name: "Ramphastos sulfuratus"
}
item {
name: "2413"
id: 2168
display_name: "Dacelo novaeguineae"
}
item {
name: "482"
id: 2169
display_name: "Fulica atra"
}
item {
name: "47251"
id: 2170
display_name: "Sphyraena barracuda"
}
item {
name: "358549"
id: 2171
display_name: "Hemaris diffinis"
}
item {
name: "81526"
id: 2172
display_name: "Crotalus viridis"
}
item {
name: "342169"
id: 2173
display_name: "Hirundo rustica erythrogaster"
}
item {
name: "39280"
id: 2174
display_name: "Leiocephalus carinatus"
}
item {
name: "47269"
id: 2175
display_name: "Dasyatis americana"
}
item {
name: "55467"
id: 2176
display_name: "Sabulodes aegrotata"
}
item {
name: "6316"
id: 2177
display_name: "Calypte costae"
}
item {
name: "6317"
id: 2178
display_name: "Calypte anna"
}
item {
name: "47280"
id: 2179
display_name: "Pterois volitans"
}
item {
name: "81608"
id: 2180
display_name: "Geukensia demissa"
}
item {
name: "121012"
id: 2181
display_name: "Euglandina rosea"
}
item {
name: "236980"
id: 2182
display_name: "Colaptes auratus cafer"
}
item {
name: "38673"
id: 2183
display_name: "Aspidoscelis tigris tigris"
}
item {
name: "3786"
id: 2184
display_name: "Sula nebouxii"
}
item {
name: "55487"
id: 2185
display_name: "Diabrotica undecimpunctata"
}
item {
name: "243904"
id: 2186
display_name: "Phrynosoma platyrhinos"
}
item {
name: "55489"
id: 2187
display_name: "Cycloneda munda"
}
item {
name: "204491"
id: 2188
display_name: "Copsychus saularis"
}
item {
name: "55492"
id: 2189
display_name: "Cycloneda polita"
}
item {
name: "129222"
id: 2190
display_name: "Heterophleps triguttaria"
}
item {
name: "129223"
id: 2191
display_name: "Pasiphila rectangulata"
}
item {
name: "28365"
id: 2192
display_name: "Thamnophis sirtalis sirtalis"
}
item {
name: "47316"
id: 2193
display_name: "Chaetodon lunula"
}
item {
name: "6359"
id: 2194
display_name: "Selasphorus sasin"
}
item {
name: "62500"
id: 2195
display_name: "Leptophobia aripa"
}
item {
name: "6363"
id: 2196
display_name: "Selasphorus rufus"
}
item {
name: "96480"
id: 2197
display_name: "Calopteryx aequabilis"
}
item {
name: "55521"
id: 2198
display_name: "Papilio eurymedon"
}
item {
name: "6371"
id: 2199
display_name: "Calothorax lucifer"
}
item {
name: "129263"
id: 2200
display_name: "Syrbula admirabilis"
}
item {
name: "28371"
id: 2201
display_name: "Thamnophis sirtalis fitchi"
}
item {
name: "243962"
id: 2202
display_name: "Charina bottae"
}
item {
name: "145659"
id: 2203
display_name: "Acronicta americana"
}
item {
name: "14588"
id: 2204
display_name: "Pycnonotus barbatus"
}
item {
name: "480298"
id: 2205
display_name: "Cornu aspersum"
}
item {
name: "51584"
id: 2206
display_name: "Melanitis leda"
}
item {
name: "243970"
id: 2207
display_name: "Larus glaucescens \303\227 occidentalis"
}
item {
name: "55556"
id: 2208
display_name: "Oncopeltus fasciatus"
}
item {
name: "506117"
id: 2209
display_name: "Aphelocoma woodhouseii"
}
item {
name: "63750"
id: 2210
display_name: "Anavitrinella pampinaria"
}
item {
name: "30983"
id: 2211
display_name: "Sistrurus miliarius"
}
item {
name: "211210"
id: 2212
display_name: "Holocnemus pluchei"
}
item {
name: "49587"
id: 2213
display_name: "Micropterus salmoides"
}
item {
name: "6417"
id: 2214
display_name: "Florisuga mellivora"
}
item {
name: "47381"
id: 2215
display_name: "Latrodectus mactans"
}
item {
name: "47382"
id: 2216
display_name: "Latrodectus hesperus"
}
item {
name: "4851"
id: 2217
display_name: "Haematopus finschi"
}
item {
name: "51588"
id: 2218
display_name: "Papilio polytes"
}
item {
name: "144431"
id: 2219
display_name: "Falcipennis canadensis"
}
item {
name: "118490"
id: 2220
display_name: "Haematopis grataria"
}
item {
name: "6433"
id: 2221
display_name: "Archilochus alexandri"
}
item {
name: "52956"
id: 2222
display_name: "Chaetodon capistratus"
}
item {
name: "203050"
id: 2223
display_name: "Junonia genoveva"
}
item {
name: "5170"
id: 2224
display_name: "Circus cyaneus"
}
item {
name: "84332"
id: 2225
display_name: "Panorpa nuptialis"
}
item {
name: "47414"
id: 2226
display_name: "Emerita analoga"
}
item {
name: "129335"
id: 2227
display_name: "Gibbifer californicus"
}
item {
name: "55610"
id: 2228
display_name: "Pyrrhocoris apterus"
}
item {
name: "58421"
id: 2229
display_name: "Phidippus johnsoni"
}
item {
name: "208608"
id: 2230
display_name: "Trachymela sloanei"
}
item {
name: "68138"
id: 2231
display_name: "Sympetrum corruptum"
}
item {
name: "129350"
id: 2232
display_name: "Photinus pyralis"
}
item {
name: "55625"
id: 2233
display_name: "Sympetrum striolatum"
}
item {
name: "55626"
id: 2234
display_name: "Pieris rapae"
}
item {
name: "203084"
id: 2235
display_name: "Ardea alba modesta"
}
item {
name: "129362"
id: 2236
display_name: "Zerene cesonia"
}
item {
name: "55638"
id: 2237
display_name: "Anania hortulata"
}
item {
name: "148537"
id: 2238
display_name: "Astraptes fulgerator"
}
item {
name: "55640"
id: 2239
display_name: "Celastrina argiolus"
}
item {
name: "55641"
id: 2240
display_name: "Polyommatus icarus"
}
item {
name: "16028"
id: 2241
display_name: "Myiarchus crinitus"
}
item {
name: "55643"
id: 2242
display_name: "Araschnia levana"
}
item {
name: "121180"
id: 2243
display_name: "Megastraea undosa"
}
item {
name: "47454"
id: 2244
display_name: "Triopha catalinae"
}
item {
name: "28389"
id: 2245
display_name: "Thamnophis ordinoides"
}
item {
name: "68139"
id: 2246
display_name: "Sympetrum vicinum"
}
item {
name: "55651"
id: 2247
display_name: "Autographa gamma"
}
item {
name: "55653"
id: 2248
display_name: "Maniola jurtina"
}
item {
name: "84369"
id: 2249
display_name: "Libellula forensis"
}
item {
name: "47135"
id: 2250
display_name: "Badumna longinqua"
}
item {
name: "48213"
id: 2251
display_name: "Ariolimax californicus"
}
item {
name: "121196"
id: 2252
display_name: "Acanthurus coeruleus"
}
item {
name: "47469"
id: 2253
display_name: "Doris montereyensis"
}
item {
name: "5181"
id: 2254
display_name: "Buteo regalis"
}
item {
name: "47472"
id: 2255
display_name: "Acanthodoris lutea"
}
item {
name: "129415"
id: 2256
display_name: "Copaeodes aurantiaca"
}
item {
name: "47505"
id: 2257
display_name: "Geitodoris heathi"
}
item {
name: "28398"
id: 2258
display_name: "Thamnophis elegans"
}
item {
name: "6553"
id: 2259
display_name: "Aeronautes saxatalis"
}
item {
name: "47516"
id: 2260
display_name: "Oncorhynchus mykiss"
}
item {
name: "6557"
id: 2261
display_name: "Chaetura vauxi"
}
item {
name: "47518"
id: 2262
display_name: "Salmo trutta"
}
item {
name: "55711"
id: 2263
display_name: "Ladona depressa"
}
item {
name: "55719"
id: 2264
display_name: "Eristalis tenax"
}
item {
name: "6571"
id: 2265
display_name: "Chaetura pelagica"
}
item {
name: "119881"
id: 2266
display_name: "Chrysochus cobaltinus"
}
item {
name: "145239"
id: 2267
display_name: "Setophaga pensylvanica"
}
item {
name: "154043"
id: 2268
display_name: "Bombus huntii"
}
item {
name: "41955"
id: 2269
display_name: "Acinonyx jubatus"
}
item {
name: "55746"
id: 2270
display_name: "Misumena vatia"
}
item {
name: "12024"
id: 2271
display_name: "Lanius ludovicianus"
}
item {
name: "5063"
id: 2272
display_name: "Anhinga anhinga"
}
item {
name: "59892"
id: 2273
display_name: "Prionus californicus"
}
item {
name: "52986"
id: 2274
display_name: "Largus californicus"
}
item {
name: "204454"
id: 2275
display_name: "Acridotheres tristis"
}
item {
name: "14816"
id: 2276
display_name: "Sitta pygmaea"
}
item {
name: "148560"
id: 2277
display_name: "Mestra amymone"
}
item {
name: "4585"
id: 2278
display_name: "Actophilornis africanus"
}
item {
name: "47590"
id: 2279
display_name: "Phloeodes diabolicus"
}
item {
name: "14823"
id: 2280
display_name: "Sitta canadensis"
}
item {
name: "14824"
id: 2281
display_name: "Sitta europaea"
}
item {
name: "14825"
id: 2282
display_name: "Sitta pusilla"
}
item {
name: "67598"
id: 2283
display_name: "Solenopsis invicta"
}
item {
name: "6638"
id: 2284
display_name: "Apus apus"
}
item {
name: "301557"
id: 2285
display_name: "Euphoria basalis"
}
item {
name: "132070"
id: 2286
display_name: "Phaneroptera nana"
}
item {
name: "14850"
id: 2287
display_name: "Sturnus vulgaris"
}
item {
name: "62550"
id: 2288
display_name: "Seiurus aurocapilla"
}
item {
name: "64006"
id: 2289
display_name: "Corbicula fluminea"
}
item {
name: "204545"
id: 2290
display_name: "Motacilla flava"
}
item {
name: "47632"
id: 2291
display_name: "Katharina tunicata"
}
item {
name: "325309"
id: 2292
display_name: "Chortophaga viridifasciata viridifasciata"
}
item {
name: "104993"
id: 2293
display_name: "Macrodiplax balteata"
}
item {
name: "17408"
id: 2294
display_name: "Vireo griseus"
}
item {
name: "14895"
id: 2295
display_name: "Toxostoma longirostre"
}
item {
name: "47664"
id: 2296
display_name: "Henricia leviuscula"
}
item {
name: "31281"
id: 2297
display_name: "Calotes versicolor"
}
item {
name: "119086"
id: 2298
display_name: "Agrius cingulata"
}
item {
name: "3849"
id: 2299
display_name: "Calidris alba"
}
item {
name: "14906"
id: 2300
display_name: "Toxostoma redivivum"
}
item {
name: "144479"
id: 2301
display_name: "Gallinula galeata"
}
item {
name: "3850"
id: 2302
display_name: "Calidris himantopus"
}
item {
name: "117520"
id: 2303
display_name: "Enhydra lutris nereis"
}
item {
name: "51491"
id: 2304
display_name: "Myliobatis californica"
}
item {
name: "121612"
id: 2305
display_name: "Estigmene acrea"
}
item {
name: "105034"
id: 2306
display_name: "Macromia illinoiensis"
}
item {
name: "6498"
id: 2307
display_name: "Eugenes fulgens"
}
item {
name: "46179"
id: 2308
display_name: "Cynomys ludovicianus"
}
item {
name: "105049"
id: 2309
display_name: "Macromia taeniolata"
}
item {
name: "94045"
id: 2310
display_name: "Anax longipes"
}
item {
name: "143119"
id: 2311
display_name: "Galgula partita"
}
item {
name: "9317"
id: 2312
display_name: "Icterus wagleri"
}
item {
name: "122704"
id: 2313
display_name: "Nucella ostrina"
}
item {
name: "146709"
id: 2314
display_name: "Grylloprociphilus imbricator"
}
item {
name: "9318"
id: 2315
display_name: "Icterus parisorum"
}
item {
name: "85333"
id: 2316
display_name: "Micrathena gracilis"
}
item {
name: "126737"
id: 2317
display_name: "Anania funebris"
}
item {
name: "49053"
id: 2318
display_name: "Cryptochiton stelleri"
}
item {
name: "47721"
id: 2319
display_name: "Parastichopus californicus"
}
item {
name: "34050"
id: 2320
display_name: "Phelsuma laticauda"
}
item {
name: "154219"
id: 2321
display_name: "Notarctia proxima"
}
item {
name: "51781"
id: 2322
display_name: "Tyria jacobaeae"
}
item {
name: "24230"
id: 2323
display_name: "Acris crepitans"
}
item {
name: "146032"
id: 2324
display_name: "Coluber flagellum"
}
item {
name: "146033"
id: 2325
display_name: "Coluber flagellum flagellum"
}
item {
name: "244340"
id: 2326
display_name: "Hordnia atropunctata"
}
item {
name: "146037"
id: 2327
display_name: "Coluber taeniatus"
}
item {
name: "244344"
id: 2328
display_name: "Scopula rubraria"
}
item {
name: "47737"
id: 2329
display_name: "Harpaphe haydeniana"
}
item {
name: "5227"
id: 2330
display_name: "Buteo platypterus"
}
item {
name: "39556"
id: 2331
display_name: "Apalone spinifera"
}
item {
name: "39560"
id: 2332
display_name: "Apalone spinifera emoryi"
}
item {
name: "318836"
id: 2333
display_name: "Gallinago gallinago"
}
item {
name: "105098"
id: 2334
display_name: "Magicicada septendecim"
}
item {
name: "96907"
id: 2335
display_name: "Celithemis fasciata"
}
item {
name: "9325"
id: 2336
display_name: "Icterus spurius"
}
item {
name: "3864"
id: 2337
display_name: "Calidris minutilla"
}
item {
name: "14995"
id: 2338
display_name: "Dumetella carolinensis"
}
item {
name: "424597"
id: 2339
display_name: "Porphyrio hochstetteri"
}
item {
name: "47768"
id: 2340
display_name: "Doriopsilla albopunctata"
}
item {
name: "498116"
id: 2341
display_name: "Aeolidia papillosa"
}
item {
name: "244378"
id: 2342
display_name: "Mallophora fautrix"
}
item {
name: "3866"
id: 2343
display_name: "Calidris fuscicollis"
}
item {
name: "47776"
id: 2344
display_name: "Ariolimax columbianus"
}
item {
name: "144497"
id: 2345
display_name: "Phalaropus tricolor"
}
item {
name: "39824"
id: 2346
display_name: "Pseudemys nelsoni"
}
item {
name: "236979"
id: 2347
display_name: "Colaptes auratus auratus"
}
item {
name: "55990"
id: 2348
display_name: "Podarcis muralis"
}
item {
name: "244407"
id: 2349
display_name: "Zelus renardii"
}
item {
name: "47802"
id: 2350
display_name: "Lymantria dispar"
}
item {
name: "15035"
id: 2351
display_name: "Melanotis caerulescens"
}
item {
name: "51658"
id: 2352
display_name: "Anthopleura artemisia"
}
item {
name: "121534"
id: 2353
display_name: "Oreta rosea"
}
item {
name: "73504"
id: 2354
display_name: "Tiaris olivaceus"
}
item {
name: "15045"
id: 2355
display_name: "Oreoscoptes montanus"
}
item {
name: "3873"
id: 2356
display_name: "Limnodromus scolopaceus"
}
item {
name: "47673"
id: 2357
display_name: "Pycnopodia helianthoides"
}
item {
name: "47817"
id: 2358
display_name: "Libellula saturata"
}
item {
name: "56644"
id: 2359
display_name: "Polygonia satyrus"
}
item {
name: "47826"
id: 2360
display_name: "Cancer productus"
}
item {
name: "3875"
id: 2361
display_name: "Tringa solitaria"
}
item {
name: "39782"
id: 2362
display_name: "Trachemys scripta"
}
item {
name: "143140"
id: 2363
display_name: "Cyllopsis gemma"
}
item {
name: "29818"
id: 2364
display_name: "Lampropeltis holbrooki"
}
item {
name: "56293"
id: 2365
display_name: "Macroglossum stellatarum"
}
item {
name: "154340"
id: 2366
display_name: "Gryllodes sigillatus"
}
item {
name: "14801"
id: 2367
display_name: "Sitta carolinensis"
}
item {
name: "121578"
id: 2368
display_name: "Ovis aries"
}
item {
name: "3879"
id: 2369
display_name: "Tringa totanus"
}
item {
name: "6893"
id: 2370
display_name: "Dendrocygna autumnalis"
}
item {
name: "154353"
id: 2371
display_name: "Sunira bicolorago"
}
item {
name: "6898"
id: 2372
display_name: "Dendrocygna viduata"
}
item {
name: "6899"
id: 2373
display_name: "Dendrocygna bicolor"
}
item {
name: "9342"
id: 2374
display_name: "Icterus abeillei"
}
item {
name: "39670"
id: 2375
display_name: "Lepidochelys olivacea"
}
item {
name: "4867"
id: 2376
display_name: "Vanellus chilensis"
}
item {
name: "39677"
id: 2377
display_name: "Dermochelys coriacea"
}
item {
name: "113407"
id: 2378
display_name: "Stylurus plagiatus"
}
item {
name: "39682"
id: 2379
display_name: "Chelydra serpentina"
}
item {
name: "6915"
id: 2380
display_name: "Cygnus buccinator"
}
item {
name: "6916"
id: 2381
display_name: "Cygnus cygnus"
}
item {
name: "6917"
id: 2382
display_name: "Cygnus columbianus"
}
item {
name: "29825"
id: 2383
display_name: "Lampropeltis calligaster calligaster"
}
item {
name: "6921"
id: 2384
display_name: "Cygnus olor"
}
item {
name: "146186"
id: 2385
display_name: "Intellagama lesueurii"
}
item {
name: "9346"
id: 2386
display_name: "Icterus galbula"
}
item {
name: "126765"
id: 2387
display_name: "Plutella xylostella"
}
item {
name: "71154"
id: 2388
display_name: "Aphis nerii"
}
item {
name: "6930"
id: 2389
display_name: "Anas platyrhynchos"
}
item {
name: "6933"
id: 2390
display_name: "Anas acuta"
}
item {
name: "39703"
id: 2391
display_name: "Sternotherus odoratus"
}
item {
name: "6937"
id: 2392
display_name: "Anas crecca"
}
item {
name: "64287"
id: 2393
display_name: "Lottia digitalis"
}
item {
name: "6944"
id: 2394
display_name: "Anas cyanoptera"
}
item {
name: "39713"
id: 2395
display_name: "Kinosternon subrubrum"
}
item {
name: "26691"
id: 2396
display_name: "Scaphiopus couchii"
}
item {
name: "6948"
id: 2397
display_name: "Anas fulvigula"
}
item {
name: "6953"
id: 2398
display_name: "Anas discors"
}
item {
name: "47914"
id: 2399
display_name: "Eumorpha pandorus"
}
item {
name: "47916"
id: 2400
display_name: "Actias luna"
}
item {
name: "6957"
id: 2401
display_name: "Anas strepera"
}
item {
name: "47919"
id: 2402
display_name: "Antheraea polyphemus"
}
item {
name: "119953"
id: 2403
display_name: "Hypoprepia fucosa"
}
item {
name: "6961"
id: 2404
display_name: "Anas clypeata"
}
item {
name: "134119"
id: 2405
display_name: "Anisomorpha buprestoides"
}
item {
name: "51678"
id: 2406
display_name: "Coenagrion puella"
}
item {
name: "72502"
id: 2407
display_name: "Anas chlorotis"
}
item {
name: "49060"
id: 2408
display_name: "Epiactis prolifera"
}
item {
name: "42122"
id: 2409
display_name: "Phacochoerus africanus"
}
item {
name: "58507"
id: 2410
display_name: "Poanes hobomok"
}
item {
name: "121669"
id: 2411
display_name: "Stenopus hispidus"
}
item {
name: "8143"
id: 2412
display_name: "Rhipidura leucophrys"
}
item {
name: "6985"
id: 2413
display_name: "Anas americana"
}
item {
name: "6993"
id: 2414
display_name: "Bucephala albeola"
}
item {
name: "121682"
id: 2415
display_name: "Tetraclita rubescens"
}
item {
name: "6996"
id: 2416
display_name: "Mergus serrator"
}
item {
name: "113498"
id: 2417
display_name: "Sympetrum ambiguum"
}
item {
name: "39771"
id: 2418
display_name: "Chrysemys picta"
}
item {
name: "7004"
id: 2419
display_name: "Mergus merganser"
}
item {
name: "39773"
id: 2420
display_name: "Chrysemys picta bellii"
}
item {
name: "113503"
id: 2421
display_name: "Sympetrum danae"
}
item {
name: "113507"
id: 2422
display_name: "Sympetrum fonscolombii"
}
item {
name: "154469"
id: 2423
display_name: "Isa textula"
}
item {
name: "47975"
id: 2424
display_name: "Argia apicalis"
}
item {
name: "7018"
id: 2425
display_name: "Anser anser"
}
item {
name: "7019"
id: 2426
display_name: "Anser albifrons"
}
item {
name: "47980"
id: 2427
display_name: "Speyeria cybele"
}
item {
name: "58514"
id: 2428
display_name: "Euphyes vestris"
}
item {
name: "113519"
id: 2429
display_name: "Sympetrum obtrusum"
}
item {
name: "7024"
id: 2430
display_name: "Somateria mollissima"
}
item {
name: "39793"
id: 2431
display_name: "Trachemys scripta scripta"
}
item {
name: "367475"
id: 2432
display_name: "Rallus obsoletus"
}
item {
name: "121716"
id: 2433
display_name: "Uresiphita reversalis"
}
item {
name: "113525"
id: 2434
display_name: "Sympetrum sanguineum"
}
item {
name: "113526"
id: 2435
display_name: "Sympetrum semicinctum"
}
item {
name: "18921"
id: 2436
display_name: "Platycercus elegans"
}
item {
name: "7032"
id: 2437
display_name: "Melanitta fusca"
}
item {
name: "5268"
id: 2438
display_name: "Milvus migrans"
}
item {
name: "144536"
id: 2439
display_name: "Gelochelidon nilotica"
}
item {
name: "413503"
id: 2440
display_name: "Ninox novaeseelandiae novaeseelandiae"
}
item {
name: "7036"
id: 2441
display_name: "Melanitta perspicillata"
}
item {
name: "64382"
id: 2442
display_name: "Lissotriton vulgaris"
}
item {
name: "39807"
id: 2443
display_name: "Terrapene ornata"
}
item {
name: "39808"
id: 2444
display_name: "Terrapene ornata luteola"
}
item {
name: "7044"
id: 2445
display_name: "Aythya collaris"
}
item {
name: "7045"
id: 2446
display_name: "Aythya ferina"
}
item {
name: "7046"
id: 2447
display_name: "Aythya fuligula"
}
item {
name: "146314"
id: 2448
display_name: "Opheodrys vernalis"
}
item {
name: "3906"
id: 2449
display_name: "Numenius americanus"
}
item {
name: "39823"
id: 2450
display_name: "Pseudemys gorzugi"
}
item {
name: "178991"
id: 2451
display_name: "Sypharochiton pelliserpentis"
}
item {
name: "7061"
id: 2452
display_name: "Chen caerulescens"
}
item {
name: "39830"
id: 2453
display_name: "Pseudemys concinna"
}
item {
name: "127490"
id: 2454
display_name: "Parrhasius m-album"
}
item {
name: "15256"
id: 2455
display_name: "Chamaea fasciata"
}
item {
name: "39836"
id: 2456
display_name: "Malaclemys terrapin"
}
item {
name: "133764"
id: 2457
display_name: "Trichopoda pennipes"
}
item {
name: "334753"
id: 2458
display_name: "Hypselonotus punctiventris"
}
item {
name: "58611"
id: 2459
display_name: "Amia calva"
}
item {
name: "56240"
id: 2460
display_name: "Argia vivida"
}
item {
name: "7089"
id: 2461
display_name: "Branta canadensis"
}
item {
name: "146354"
id: 2462
display_name: "Phrynosoma blainvillii"
}
item {
name: "56243"
id: 2463
display_name: "Plebejus acmon"
}
item {
name: "144542"
id: 2464
display_name: "Thalasseus elegans"
}
item {
name: "121783"
id: 2465
display_name: "Lithobates clamitans melanota"
}
item {
name: "39865"
id: 2466
display_name: "Glyptemys insculpta"
}
item {
name: "39867"
id: 2467
display_name: "Emys orbicularis"
}
item {
name: "7104"
id: 2468
display_name: "Branta sandvicensis"
}
item {
name: "50336"
id: 2469
display_name: "Siproeta stelenes"
}
item {
name: "7056"
id: 2470
display_name: "Aythya americana"
}
item {
name: "7107"
id: 2471
display_name: "Aix sponsa"
}
item {
name: "7109"
id: 2472
display_name: "Lophodytes cucullatus"
}
item {
name: "7111"
id: 2473
display_name: "Histrionicus histrionicus"
}
item {
name: "367562"
id: 2474
display_name: "Aratinga nenday"
}
item {
name: "39885"
id: 2475
display_name: "Emydoidea blandingii"
}
item {
name: "367566"
id: 2476
display_name: "Psittacara holochlorus"
}
item {
name: "143181"
id: 2477
display_name: "Marimatha nigrofimbria"
}
item {
name: "7120"
id: 2478
display_name: "Cairina moschata"
}
item {
name: "7122"
id: 2479
display_name: "Netta rufina"
}
item {
name: "130003"
id: 2480
display_name: "Phaeoura quernaria"
}
item {
name: "367572"
id: 2481
display_name: "Psittacara erythrogenys"
}
item {
name: "17009"
id: 2482
display_name: "Sayornis saya"
}
item {
name: "154582"
id: 2483
display_name: "Ennomos magnaria"
}
item {
name: "58532"
id: 2484
display_name: "Colias eurytheme"
}
item {
name: "121821"
id: 2485
display_name: "Sceliphron caementarium"
}
item {
name: "48094"
id: 2486
display_name: "Dryocampa rubicunda"
}
item {
name: "7057"
id: 2487
display_name: "Aythya valisineria"
}
item {
name: "17646"
id: 2488
display_name: "Picoides albolarvatus"
}
item {
name: "201551"
id: 2489
display_name: "Procyon lotor lotor"
}
item {
name: "58534"
id: 2490
display_name: "Lycaena hyllus"
}
item {
name: "73553"
id: 2491
display_name: "Vermivora cyanoptera"
}
item {
name: "359401"
id: 2492
display_name: "Exomala orientalis"
}
item {
name: "8018"
id: 2493
display_name: "Corvus caurinus"
}
item {
name: "490478"
id: 2494
display_name: "Tegula brunnea"
}
item {
name: "20307"
id: 2495
display_name: "Asio otus"
}
item {
name: "227466"
id: 2496
display_name: "Peridea ferruginea"
}
item {
name: "122172"
id: 2497
display_name: "Pyrisitia lisa"
}
item {
name: "133631"
id: 2498
display_name: "Polites peckius"
}
item {
name: "8021"
id: 2499
display_name: "Corvus brachyrhynchos"
}
item {
name: "7170"
id: 2500
display_name: "Clangula hyemalis"
}
item {
name: "58539"
id: 2501
display_name: "Satyrium calanus"
}
item {
name: "27137"
id: 2502
display_name: "Coluber constrictor"
}
item {
name: "7176"
id: 2503
display_name: "Chenonetta jubata"
}
item {
name: "42157"
id: 2504
display_name: "Giraffa camelopardalis"
}
item {
name: "144541"
id: 2505
display_name: "Thalasseus sandvicensis"
}
item {
name: "23572"
id: 2506
display_name: "Litoria aurea"
}
item {
name: "354820"
id: 2507
display_name: "Patiriella regularis"
}
item {
name: "55887"
id: 2508
display_name: "Andricus quercuscalifornicus"
}
item {
name: "46255"
id: 2509
display_name: "Ammospermophilus leucurus"
}
item {
name: "334341"
id: 2510
display_name: "Oryctolagus cuniculus domesticus"
}
item {
name: "144560"
id: 2511
display_name: "Eolophus roseicapilla"
}
item {
name: "94043"
id: 2512
display_name: "Anax imperator"
}
item {
name: "425004"
id: 2513
display_name: "Dryas iulia moderata"
}
item {
name: "269359"
id: 2514
display_name: "Cactophagus spinolae"
}
item {
name: "72755"
id: 2515
display_name: "Colaptes rubiginosus"
}
item {
name: "319123"
id: 2516
display_name: "Meleagris gallopavo silvestris"
}
item {
name: "130846"
id: 2517
display_name: "Lyssa zampa"
}
item {
name: "203831"
id: 2518
display_name: "Nemoria bistriaria"
}
item {
name: "367678"
id: 2519
display_name: "Ptiliogonys cinereus"
}
item {
name: "5301"
id: 2520
display_name: "Elanoides forficatus"
}
item {
name: "9398"
id: 2521
display_name: "Carduelis carduelis"
}
item {
name: "143201"
id: 2522
display_name: "Coryphista meadii"
}
item {
name: "104419"
id: 2523
display_name: "Lestes australis"
}
item {
name: "367693"
id: 2524
display_name: "Cassiculus melanicterus"
}
item {
name: "143452"
id: 2525
display_name: "Deidamia inscriptum"
}
item {
name: "466003"
id: 2526
display_name: "Romalea microptera"
}
item {
name: "84494"
id: 2527
display_name: "Paraphidippus aurantius"
}
item {
name: "203866"
id: 2528
display_name: "Rabdophaga strobiloides"
}
item {
name: "72797"
id: 2529
display_name: "Dendragapus fuliginosus"
}
item {
name: "7266"
id: 2530
display_name: "Psaltriparus minimus"
}
item {
name: "120920"
id: 2531
display_name: "Odocoileus virginianus clavium"
}
item {
name: "7278"
id: 2532
display_name: "Aegithalos caudatus"
}
item {
name: "30681"
id: 2533
display_name: "Agkistrodon contortrix mokasen"
}
item {
name: "413547"
id: 2534
display_name: "Zosterops lateralis lateralis"
}
item {
name: "48262"
id: 2535
display_name: "Apatelodes torrefacta"
}
item {
name: "121993"
id: 2536
display_name: "Lampides boeticus"
}
item {
name: "48267"
id: 2537
display_name: "Crotalus oreganus oreganus"
}
item {
name: "48268"
id: 2538
display_name: "Crotalus oreganus"
}
item {
name: "147309"
id: 2539
display_name: "Feltia herilis"
}
item {
name: "146413"
id: 2540
display_name: "Sceloporus consobrinus"
}
item {
name: "326764"
id: 2541
display_name: "Cyprinus carpio haematopterus"
}
item {
name: "5315"
id: 2542
display_name: "Haliaeetus leucogaster"
}
item {
name: "4519"
id: 2543
display_name: "Uria aalge"
}
item {
name: "40085"
id: 2544
display_name: "Gopherus polyphemus"
}
item {
name: "23702"
id: 2545
display_name: "Agalychnis callidryas"
}
item {
name: "210116"
id: 2546
display_name: "Tringa semipalmata inornatus"
}
item {
name: "40092"
id: 2547
display_name: "Stigmochelys pardalis"
}
item {
name: "59931"
id: 2548
display_name: "Acanthurus triostegus"
}
item {
name: "48292"
id: 2549
display_name: "Philoscia muscorum"
}
item {
name: "146601"
id: 2550
display_name: "Scolopendra heros"
}
item {
name: "244906"
id: 2551
display_name: "Panchlora nivea"
}
item {
name: "48302"
id: 2552
display_name: "Limulus polyphemus"
}
item {
name: "180008"
id: 2553
display_name: "Otospermophilus variegatus"
}
item {
name: "7347"
id: 2554
display_name: "Alauda arvensis"
}
item {
name: "43459"
id: 2555
display_name: "Macaca fascicularis"
}
item {
name: "113846"
id: 2556
display_name: "Telebasis salva"
}
item {
name: "7356"
id: 2557
display_name: "Galerida cristata"
}
item {
name: "64705"
id: 2558
display_name: "Delichon urbicum"
}
item {
name: "145932"
id: 2559
display_name: "Aspidoscelis hyperythra beldingi"
}
item {
name: "72912"
id: 2560
display_name: "Helmitheros vermivorum"
}
item {
name: "69805"
id: 2561
display_name: "Octogomphus specularis"
}
item {
name: "129572"
id: 2562
display_name: "Aphomia sociella"
}
item {
name: "31964"
id: 2563
display_name: "Barisia imbricata"
}
item {
name: "244625"
id: 2564
display_name: "Halmus chalybeus"
}
item {
name: "58576"
id: 2565
display_name: "Phyciodes cocyta"
}
item {
name: "72931"
id: 2566
display_name: "Hylocharis leucotis"
}
item {
name: "104449"
id: 2567
display_name: "Lestes rectangularis"
}
item {
name: "14886"
id: 2568
display_name: "Mimus polyglottos"
}
item {
name: "23783"
id: 2569
display_name: "Hyla versicolor"
}
item {
name: "23784"
id: 2570
display_name: "Hyla plicata"
}
item {
name: "8575"
id: 2571
display_name: "Gymnorhina tibicen"
}
item {
name: "2599"
id: 2572
display_name: "Alcedo atthis"
}
item {
name: "61152"
id: 2573
display_name: "Pyrrhosoma nymphula"
}
item {
name: "58579"
id: 2574
display_name: "Polygonia interrogationis"
}
item {
name: "31993"
id: 2575
display_name: "Ophisaurus attenuatus attenuatus"
}
item {
name: "53985"
id: 2576
display_name: "Odocoileus hemionus californicus"
}
item {
name: "144549"
id: 2577
display_name: "Streptopelia chinensis"
}
item {
name: "105730"
id: 2578
display_name: "Micrathyria hagenii"
}
item {
name: "7428"
id: 2579
display_name: "Bombycilla cedrorum"
}
item {
name: "7429"
id: 2580
display_name: "Bombycilla garrulus"
}
item {
name: "50391"
id: 2581
display_name: "Polygonia gracilis"
}
item {
name: "7067"
id: 2582
display_name: "Tadorna tadorna"
}
item {
name: "413513"
id: 2583
display_name: "Petroica australis australis"
}
item {
name: "39469"
id: 2584
display_name: "Varanus varius"
}
item {
name: "58479"
id: 2585
display_name: "Pholisora catullus"
}
item {
name: "127929"
id: 2586
display_name: "Achalarus lyciades"
}
item {
name: "48403"
id: 2587
display_name: "Gasterosteus aculeatus"
}
item {
name: "18990"
id: 2588
display_name: "Amazona autumnalis"
}
item {
name: "1241"
id: 2589
display_name: "Dendragapus obscurus"
}
item {
name: "228634"
id: 2590
display_name: "Ponometia erastrioides"
}
item {
name: "64806"
id: 2591
display_name: "Pelophylax"
}
item {
name: "51761"
id: 2592
display_name: "Hetaerina americana"
}
item {
name: "7464"
id: 2593
display_name: "Catherpes mexicanus"
}
item {
name: "318761"
id: 2594
display_name: "Sceloporus uniformis"
}
item {
name: "7068"
id: 2595
display_name: "Tadorna ferruginea"
}
item {
name: "204077"
id: 2596
display_name: "Achyra rantalis"
}
item {
name: "7470"
id: 2597
display_name: "Campylorhynchus brunneicapillus"
}
item {
name: "32048"
id: 2598
display_name: "Gerrhonotus infernalis"
}
item {
name: "204081"
id: 2599
display_name: "Pyrausta laticlavia"
}
item {
name: "7476"
id: 2600
display_name: "Campylorhynchus rufinucha"
}
item {
name: "32055"
id: 2601
display_name: "Elgaria multicarinata"
}
item {
name: "244276"
id: 2602
display_name: "Rhipidura fuliginosa"
}
item {
name: "144187"
id: 2603
display_name: "Pyrisitia proterpia"
}
item {
name: "32059"
id: 2604
display_name: "Elgaria multicarinata multicarinata"
}
item {
name: "32061"
id: 2605
display_name: "Elgaria kingii"
}
item {
name: "146750"
id: 2606
display_name: "Lascoria ambigualis"
}
item {
name: "32064"
id: 2607
display_name: "Elgaria coerulea"
}
item {
name: "23873"
id: 2608
display_name: "Hyla squirella"
}
item {
name: "48450"
id: 2609
display_name: "Peltodoris nobilis"
}
item {
name: "64146"
id: 2610
display_name: "Fissurella volcano"
}
item {
name: "48259"
id: 2611
display_name: "Pelidnota punctata"
}
item {
name: "122185"
id: 2612
display_name: "Pantherophis alleghaniensis quadrivittata"
}
item {
name: "7498"
id: 2613
display_name: "Polioptila melanura"
}
item {
name: "56652"
id: 2614
display_name: "Haliotis rufescens"
}
item {
name: "122191"
id: 2615
display_name: "Pelecanus occidentalis carolinensis"
}
item {
name: "73041"
id: 2616
display_name: "Melozone aberti"
}
item {
name: "199381"
id: 2617
display_name: "Homalodisca vitripennis"
}
item {
name: "73044"
id: 2618
display_name: "Melozone crissalis"
}
item {
name: "83290"
id: 2619
display_name: "Zanclus cornutus"
}
item {
name: "7513"
id: 2620
display_name: "Thryothorus ludovicianus"
}
item {
name: "28559"
id: 2621
display_name: "Storeria occipitomaculata occipitomaculata"
}
item {
name: "24255"
id: 2622
display_name: "Pseudacris maculata"
}
item {
name: "130398"
id: 2623
display_name: "Melanargia galathea"
}
item {
name: "29925"
id: 2624
display_name: "Heterodon platirhinos"
}
item {
name: "48484"
id: 2625
display_name: "Harmonia axyridis"
}
item {
name: "122214"
id: 2626
display_name: "Odontotaenius disjunctus"
}
item {
name: "39484"
id: 2627
display_name: "Xantusia vigilis"
}
item {
name: "73919"
id: 2628
display_name: "Podarcis sicula"
}
item {
name: "154553"
id: 2629
display_name: "Leptoglossus clypealis"
}
item {
name: "23922"
id: 2630
display_name: "Hyla intermedia"
}
item {
name: "122228"
id: 2631
display_name: "Acharia stimulea"
}
item {
name: "108344"
id: 2632
display_name: "Pantala flavescens"
}
item {
name: "118538"
id: 2633
display_name: "Cotinis nitida"
}
item {
name: "23930"
id: 2634
display_name: "Hyla chrysoscelis"
}
item {
name: "23933"
id: 2635
display_name: "Hyla arenicolor"
}
item {
name: "122238"
id: 2636
display_name: "Porcellio scaber"
}
item {
name: "479803"
id: 2637
display_name: "Dioprosopa clavata"
}
item {
name: "5355"
id: 2638
display_name: "Parabuteo unicinctus"
}
item {
name: "146822"
id: 2639
display_name: "Texola elada"
}
item {
name: "236935"
id: 2640
display_name: "Anas platyrhynchos domesticus"
}
item {
name: "7562"
id: 2641
display_name: "Troglodytes aedon"
}
item {
name: "339444"
id: 2642
display_name: "Buteo lineatus elegans"
}
item {
name: "42221"
id: 2643
display_name: "Odocoileus hemionus columbianus"
}
item {
name: "15764"
id: 2644
display_name: "Thamnophilus doliatus"
}
item {
name: "122261"
id: 2645
display_name: "Cucullia convexipennis"
}
item {
name: "122262"
id: 2646
display_name: "Brachystola magna"
}
item {
name: "7576"
id: 2647
display_name: "Thryomanes bewickii"
}
item {
name: "143015"
id: 2648
display_name: "Eubaphe mendica"
}
item {
name: "73592"
id: 2649
display_name: "Actinemys marmorata"
}
item {
name: "84549"
id: 2650
display_name: "Plathemis lydia"
}
item {
name: "23969"
id: 2651
display_name: "Hyla cinerea"
}
item {
name: "318882"
id: 2652
display_name: "Ancistrocerus gazella"
}
item {
name: "7072"
id: 2653
display_name: "Tadorna variegata"
}
item {
name: "48548"
id: 2654
display_name: "Vanessa cardui"
}
item {
name: "48549"
id: 2655
display_name: "Vanessa virginiensis"
}
item {
name: "122278"
id: 2656
display_name: "Pomacea canaliculata"
}
item {
name: "9457"
id: 2657
display_name: "Myioborus miniatus"
}
item {
name: "122280"
id: 2658
display_name: "Pyrgus albescens"
}
item {
name: "122281"
id: 2659
display_name: "Calycopis cecrops"
}
item {
name: "130474"
id: 2660
display_name: "Achlyodes pallida"
}
item {
name: "338503"
id: 2661
display_name: "Phalacrocorax varius varius"
}
item {
name: "9458"
id: 2662
display_name: "Myioborus pictus"
}
item {
name: "73629"
id: 2663
display_name: "Anolis nebulosus"
}
item {
name: "122291"
id: 2664
display_name: "Larus argentatus smithsonianus"
}
item {
name: "56756"
id: 2665
display_name: "Murgantia histrionica"
}
item {
name: "73148"
id: 2666
display_name: "Parkesia motacilla"
}
item {
name: "48575"
id: 2667
display_name: "Okenia rosacea"
}
item {
name: "56768"
id: 2668
display_name: "Sula granti"
}
item {
name: "48578"
id: 2669
display_name: "Anteos maerula"
}
item {
name: "64968"
id: 2670
display_name: "Anaxyrus americanus"
}
item {
name: "64970"
id: 2671
display_name: "Anaxyrus boreas"
}
item {
name: "115549"
id: 2672
display_name: "Crotalus lepidus lepidus"
}
item {
name: "64977"
id: 2673
display_name: "Anaxyrus fowleri"
}
item {
name: "19022"
id: 2674
display_name: "Ara macao"
}
item {
name: "24259"
id: 2675
display_name: "Pseudacris regilla"
}
item {
name: "64984"
id: 2676
display_name: "Anaxyrus punctatus"
}
item {
name: "64985"
id: 2677
display_name: "Anaxyrus quercicus"
}
item {
name: "73178"
id: 2678
display_name: "Peucaea ruficauda"
}
item {
name: "64987"
id: 2679
display_name: "Anaxyrus speciosus"
}
item {
name: "64989"
id: 2680
display_name: "Anaxyrus woodhousii"
}
item {
name: "339596"
id: 2681
display_name: "Calidris subruficollis"
}
item {
name: "56552"
id: 2682
display_name: "Carabus nemoralis"
}
item {
name: "84722"
id: 2683
display_name: "Ischnura verticalis"
}
item {
name: "122356"
id: 2684
display_name: "Eumorpha achemon"
}
item {
name: "318965"
id: 2685
display_name: "Chrysolina bankii"
}
item {
name: "228855"
id: 2686
display_name: "Protodeltote muscosula"
}
item {
name: "146940"
id: 2687
display_name: "Agriphila vulgivagella"
}
item {
name: "56832"
id: 2688
display_name: "Nymphalis antiopa"
}
item {
name: "61355"
id: 2689
display_name: "Vespula pensylvanica"
}
item {
name: "48645"
id: 2690
display_name: "Megathura crenulata"
}
item {
name: "73222"
id: 2691
display_name: "Phoenicopterus roseus"
}
item {
name: "363354"
id: 2692
display_name: "Lobatus gigas"
}
item {
name: "3802"
id: 2693
display_name: "Morus bassanus"
}
item {
name: "62722"
id: 2694
display_name: "Apalone spinifera spinifera"
}
item {
name: "48655"
id: 2695
display_name: "Aplysia californica"
}
item {
name: "54468"
id: 2696
display_name: "Aglais urticae"
}
item {
name: "48662"
id: 2697
display_name: "Danaus plexippus"
}
item {
name: "49071"
id: 2698
display_name: "Metridium senile"
}
item {
name: "228899"
id: 2699
display_name: "Psamatodes abydata"
}
item {
name: "133102"
id: 2700
display_name: "Oncometopia orbona"
}
item {
name: "39659"
id: 2701
display_name: "Chelonia mydas"
}
item {
name: "121437"
id: 2702
display_name: "Dolomedes triton"
}
item {
name: "94545"
id: 2703
display_name: "Argia fumipennis"
}
item {
name: "56887"
id: 2704
display_name: "Bombus pensylvanicus"
}
item {
name: "40509"
id: 2705
display_name: "Eptesicus fuscus"
}
item {
name: "58635"
id: 2706
display_name: "Lepomis megalotis"
}
item {
name: "100369"
id: 2707
display_name: "Erpetogomphus designatus"
}
item {
name: "58636"
id: 2708
display_name: "Lepomis cyanellus"
}
item {
name: "40522"
id: 2709
display_name: "Lasiurus borealis"
}
item {
name: "102006"
id: 2710
display_name: "Hagenius brevistylus"
}
item {
name: "50283"
id: 2711
display_name: "Marpesia petreus"
}
item {
name: "123829"
id: 2712
display_name: "Pelecanus occidentalis californicus"
}
item {
name: "62453"
id: 2713
display_name: "Anthidium manicatum"
}
item {
name: "56925"
id: 2714
display_name: "Graphocephala coccinea"
}
item {
name: "48738"
id: 2715
display_name: "Sphex pensylvanicus"
}
item {
name: "43151"
id: 2716
display_name: "Oryctolagus cuniculus"
}
item {
name: "19822"
id: 2717
display_name: "Glaucidium brasilianum"
}
item {
name: "48750"
id: 2718
display_name: "Lottia scabra"
}
item {
name: "335071"
id: 2719
display_name: "Elophila obliteralis"
}
item {
name: "81521"
id: 2720
display_name: "Vipera berus"
}
item {
name: "43697"
id: 2721
display_name: "Elephas maximus"
}
item {
name: "7079"
id: 2722
display_name: "Oxyura jamaicensis"
}
item {
name: "43042"
id: 2723
display_name: "Erinaceus europaeus"
}
item {
name: "40086"
id: 2724
display_name: "Gopherus agassizii"
}
item {
name: "81545"
id: 2725
display_name: "Lumbricus terrestris"
}
item {
name: "16010"
id: 2726
display_name: "Myiarchus cinerascens"
}
item {
name: "2669"
id: 2727
display_name: "Chloroceryle americana"
}
item {
name: "9535"
id: 2728
display_name: "Sturnella neglecta"
}
item {
name: "81554"
id: 2729
display_name: "Ictalurus punctatus"
}
item {
name: "339907"
id: 2730
display_name: "Ramphastos ambiguus"
}
item {
name: "39814"
id: 2731
display_name: "Terrapene carolina"
}
item {
name: "10254"
id: 2732
display_name: "Paroaria coronata"
}
item {
name: "40614"
id: 2733
display_name: "Antrozous pallidus"
}
item {
name: "502385"
id: 2734
display_name: "Probole amicaria"
}
item {
name: "24233"
id: 2735
display_name: "Acris gryllus"
}
item {
name: "81579"
id: 2736
display_name: "Steatoda triangulosa"
}
item {
name: "81580"
id: 2737
display_name: "Callosamia promethea"
}
item {
name: "146034"
id: 2738
display_name: "Coluber lateralis"
}
item {
name: "81582"
id: 2739
display_name: "Hyalophora cecropia"
}
item {
name: "81583"
id: 2740
display_name: "Anisota senatoria"
}
item {
name: "66002"
id: 2741
display_name: "Lithobates palustris"
}
item {
name: "81586"
id: 2742
display_name: "Citheronia regalis"
}
item {
name: "40629"
id: 2743
display_name: "Lasionycteris noctivagans"
}
item {
name: "81590"
id: 2744
display_name: "Eacles imperialis"
}
item {
name: "204472"
id: 2745
display_name: "Buteo buteo"
}
item {
name: "65212"
id: 2746
display_name: "Craugastor augusti"
}
item {
name: "48830"
id: 2747
display_name: "Patiria miniata"
}
item {
name: "48833"
id: 2748
display_name: "Pisaster giganteus"
}
item {
name: "16071"
id: 2749
display_name: "Myiodynastes luteiventris"
}
item {
name: "81610"
id: 2750
display_name: "Balanus glandula"
}
item {
name: "24268"
id: 2751
display_name: "Pseudacris crucifer"
}
item {
name: "16079"
id: 2752
display_name: "Contopus sordidulus"
}
item {
name: "204496"
id: 2753
display_name: "Corvus corone"
}
item {
name: "204498"
id: 2754
display_name: "Cyanoramphus novaezelandiae"
}
item {
name: "24277"
id: 2755
display_name: "Smilisca baudinii"
}
item {
name: "22631"
id: 2756
display_name: "Eleutherodactylus planirostris"
}
item {
name: "16100"
id: 2757
display_name: "Contopus virens"
}
item {
name: "42278"
id: 2758
display_name: "Aepyceros melampus"
}
item {
name: "16106"
id: 2759
display_name: "Contopus pertinax"
}
item {
name: "16110"
id: 2760
display_name: "Contopus cooperi"
}
item {
name: "42280"
id: 2761
display_name: "Connochaetes taurinus"
}
item {
name: "47455"
id: 2762
display_name: "Octopus rubescens"
}
item {
name: "204533"
id: 2763
display_name: "Larus argentatus"
}
item {
name: "81656"
id: 2764
display_name: "Nematocampa resistaria"
}
item {
name: "81657"
id: 2765
display_name: "Lacinipolia renigera"
}
item {
name: "204519"
id: 2766
display_name: "Halcyon smyrnensis"
}
item {
name: "62762"
id: 2767
display_name: "Cordulegaster dorsalis"
}
item {
name: "81663"
id: 2768
display_name: "Malacosoma disstria"
}
item {
name: "32512"
id: 2769
display_name: "Rena dulcis"
}
item {
name: "81665"
id: 2770
display_name: "Orgyia leucostigma"
}
item {
name: "130821"
id: 2771
display_name: "Haploa confusa"
}
item {
name: "81672"
id: 2772
display_name: "Clemensia albata"
}
item {
name: "204554"
id: 2773
display_name: "Onychognathus morio"
}
item {
name: "81677"
id: 2774
display_name: "Euchaetes egle"
}
item {
name: "81680"
id: 2775
display_name: "Scopula limboundata"
}
item {
name: "318497"
id: 2776
display_name: "Hemipenthes sinuosa"
}
item {
name: "179987"
id: 2777
display_name: "Ictidomys parvidens"
}
item {
name: "179988"
id: 2778
display_name: "Ictidomys tridecemlineatus"
}
item {
name: "81685"
id: 2779
display_name: "Evergestis pallidata"
}
item {
name: "81687"
id: 2780
display_name: "Noctua pronuba"
}
item {
name: "179992"
id: 2781
display_name: "Xerospermophilus spilosoma"
}
item {
name: "179994"
id: 2782
display_name: "Urocitellus armatus"
}
item {
name: "9519"
id: 2783
display_name: "Cyanocompsa parellina"
}
item {
name: "179998"
id: 2784
display_name: "Urocitellus columbianus"
}
item {
name: "114463"
id: 2785
display_name: "Trithemis annulata"
}
item {
name: "199169"
id: 2786
display_name: "Catocala maestosa"
}
item {
name: "143323"
id: 2787
display_name: "Tolype velleda"
}
item {
name: "120113"
id: 2788
display_name: "Anthrenus verbasci"
}
item {
name: "7601"
id: 2789
display_name: "Cistothorus palustris"
}
item {
name: "81706"
id: 2790
display_name: "Alaus oculatus"
}
item {
name: "220974"
id: 2791
display_name: "Harrisimemna trisignata"
}
item {
name: "20445"
id: 2792
display_name: "Tyto alba"
}
item {
name: "73523"
id: 2793
display_name: "Trogon caligatus"
}
item {
name: "49590"
id: 2794
display_name: "Micropterus dolomieu"
}
item {
name: "41729"
id: 2795
display_name: "Mirounga leonina"
}
item {
name: "48957"
id: 2796
display_name: "Arilus cristatus"
}
item {
name: "81727"
id: 2797
display_name: "Abaeis nicippe"
}
item {
name: "8000"
id: 2798
display_name: "Corvus monedula"
}
item {
name: "8001"
id: 2799
display_name: "Corvus ossifragus"
}
item {
name: "171843"
id: 2800
display_name: "Rabdotus dealbatus"
}
item {
name: "81734"
id: 2801
display_name: "Neophasia menapia"
}
item {
name: "258813"
id: 2802
display_name: "Clogmia albipunctata"
}
item {
name: "332243"
id: 2803
display_name: "Lepturobosca chrysocoma"
}
item {
name: "81744"
id: 2804
display_name: "Heliconius erato"
}
item {
name: "218424"
id: 2805
display_name: "Dicymolomia julianalis"
}
item {
name: "3813"
id: 2806
display_name: "Spheniscus demersus"
}
item {
name: "81749"
id: 2807
display_name: "Malacosoma americanum"
}
item {
name: "81752"
id: 2808
display_name: "Pyrausta tyralis"
}
item {
name: "48987"
id: 2809
display_name: "Hippodamia convergens"
}
item {
name: "8029"
id: 2810
display_name: "Corvus frugilegus"
}
item {
name: "8031"
id: 2811
display_name: "Corvus splendens"
}
item {
name: "147298"
id: 2812
display_name: "Lasiommata megera"
}
item {
name: "7087"
id: 2813
display_name: "Branta bernicla"
}
item {
name: "48550"
id: 2814
display_name: "Phoebis sennae"
}
item {
name: "4349"
id: 2815
display_name: "Larus hyperboreus"
}
item {
name: "84027"
id: 2816
display_name: "Trigonopeltastes delta"
}
item {
name: "194762"
id: 2817
display_name: "Vanessa itea"
}
item {
name: "311163"
id: 2818
display_name: "Pseudomops septentrionalis"
}
item {
name: "55957"
id: 2819
display_name: "Scudderia furcata"
}
item {
name: "39822"
id: 2820
display_name: "Pseudemys texana"
}
item {
name: "204685"
id: 2821
display_name: "Chlosyne ehrenbergii"
}
item {
name: "122767"
id: 2822
display_name: "Columba livia domestica"
}
item {
name: "55960"
id: 2823
display_name: "Sceloporus graciosus"
}
item {
name: "121823"
id: 2824
display_name: "Autographa californica"
}
item {
name: "8088"
id: 2825
display_name: "Garrulus glandarius"
}
item {
name: "65433"
id: 2826
display_name: "Ecnomiohyla miotympanum"
}
item {
name: "49051"
id: 2827
display_name: "Anthopleura sola"
}
item {
name: "125815"
id: 2828
display_name: "Coenonympha arcania"
}
item {
name: "55963"
id: 2829
display_name: "Malacosoma californicum"
}
item {
name: "120479"
id: 2830
display_name: "Anser anser domesticus"
}
item {
name: "133788"
id: 2831
display_name: "Xylocopa micans"
}
item {
name: "81559"
id: 2832
display_name: "Epargyreus clarus"
}
item {
name: "81839"
id: 2833
display_name: "Platycryptus undatus"
}
item {
name: "133791"
id: 2834
display_name: "Polistes exclamans"
}
item {
name: "84640"
id: 2835
display_name: "Polistes dominula"
}
item {
name: "73666"
id: 2836
display_name: "Aspidoscelis exsanguis"
}
item {
name: "73669"
id: 2837
display_name: "Aspidoscelis gularis"
}
item {
name: "16326"
id: 2838
display_name: "Mitrephanes phaeocercus"
}
item {
name: "49095"
id: 2839
display_name: "Pagurus samuelis"
}
item {
name: "73672"
id: 2840
display_name: "Aspidoscelis hyperythra"
}
item {
name: "59192"
id: 2841
display_name: "Polites sabuleti"
}
item {
name: "81561"
id: 2842
display_name: "Anaea andria"
}
item {
name: "81881"
id: 2843
display_name: "Amphipsalta zelandica"
}
item {
name: "73690"
id: 2844
display_name: "Aspidoscelis sexlineata"
}
item {
name: "73694"
id: 2845
display_name: "Aspidoscelis velox"
}
item {
name: "335840"
id: 2846
display_name: "Pyrausta inornatalis"
}
item {
name: "49126"
id: 2847
display_name: "Strongylocentrotus franciscanus"
}
item {
name: "204775"
id: 2848
display_name: "Kricogonia lyside"
}
item {
name: "475115"
id: 2849
display_name: "Ardenna creatopus"
}
item {
name: "475120"
id: 2850
display_name: "Ardenna gravis"
}
item {
name: "62803"
id: 2851
display_name: "Monadenia fidelis"
}
item {
name: "49150"
id: 2852
display_name: "Agraulis vanillae"
}
item {
name: "83929"
id: 2853
display_name: "Phanaeus vindex"
}
item {
name: "199839"
id: 2854
display_name: "Haemorhous cassinii"
}
|
TensorFlow2/Detection/Efficientdet | Efficientdet | README | # EfficientDet-D0 For TensorFlow 2
This repository provides scripts and recipes to train and infer EfficientDet-D0 to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA.
## Table Of Contents
* [Model overview](#model-overview)
* [Model Architecture](#model-architecture)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick start guide](#quick-start-guide)
* [Advanced](#advanced)
* [Command-line arguments](#command-line-arguments)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Training process](#training-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 32GB)](#training-accuracy-nvidia-dgx-1-8x-v100-32gb)
* [Training accuracy: NVIDIA DGX-1 (32x V100 32GB)](#training-accuracy-nvidia-dgx-1-32x-v100-32gb)
* [Training loss curves](#training-loss-curves)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-1 (8x V100 32GB)](#training-performance-nvidia-dgx-1-8x-v100-32gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb)
* [Inference performance: NVIDIA DGX-1 (1x V100 32GB)](#inference-performance-nvidia-dgx-1-1x-v100-32gb)
* [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
EfficientDet is a family of convolution-based neural networks for object detection. Specifically, this repository covers model D0. This model is based on [EfficientDet: Scalable and Efficient Object Detection](https://arxiv.org/abs/1911.09070). NVIDIA's implementation of EfficientDet-D0 is an optimized version of [TensorFlow Automl](https://github.com/google/automl/tree/master/efficientdet) implementation, leveraging mixed precision arithmetic on NVIDIA Volta, NVIDIA Turing, and the NVIDIA Ampere GPU architectures for faster training times while maintaining target accuracy.
The EfficientDet model covered in this repository is tested against each NGC monthly released container to ensure consistent accuracy and performance over time.
The major differences between the official implementation of the paper and our version of EfficientDet are as follows:
- Automatic mixed precision (AMP) training support
- Multi-node training support using [Horovod](https://github.com/horovod/horovod)
- XLA enabled for better performance
- Lightweight logging using [dllogger](https://github.com/NVIDIA/dllogger)
- [EfficientNet backbone](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Classification/ConvNets/efficientnet) implemented by NVIDIA
- Use [BatchNormalization](https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization) instead of [SyncBatchNormalization](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/SyncBatchNormalization) for better performance
These techniques/optimizations improve model performance and reduce training time, allowing you to perform more efficient object detection with no additional effort.
Other publicly available implementations of EfficientDet include:
- [Google's automl](https://github.com/google/automl/tree/master/efficientdet)
- [PyTorch version](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Detection/Efficientdet)
### Model architecture
EfficientDet is a one-stage detector with the following architecture components:
- ImageNet-pretrained EfficientNet backbone
- Weighted bi-directional feature pyramid network (BiFPN)
- Bounding and classification box head
- A compound scaling method that uniformly scales the resolution, depth, and width for all backbone, feature network, and box/class prediction networks at the same time
### Feature support matrix
The model supports the following features.
| **Feature** | **EfficientDet** |
|:---------:|:----------:|
|Horovod Multi-GPU training (NCCL)|Yes|
|Multi-GPU training|Yes|
|Multi-node training|Yes|
|XLA|Yes|
|AMP (Automatic Mixed Precision)|Yes|
#### Features
Horovod is used to implement efficient multi-GPU training with NCCL. It is also used for multi-node training. For details, refer to example sources in this repository or refer to the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage).
AMP or Automatic Mixed Precision modifies computation graphs during runtime to support mixed precision training. A detailed explanation of mixed precision can be found below.
### Automatic Mixed Precision
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in NVIDIA Volta, and following with both the NVIDIA Turing and NVIDIA Ampere Architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on NVIDIA Volta, NVIDIA Turing, and NVIDIA Ampere GPU architectures automatically. The TensorFlow framework code makes all necessary model changes internally.
In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16. The loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling.
For information about:
- How to train using mixed precision, refer to the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, refer to the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- How to access and enable AMP for TensorFlow, refer to [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide.
#### Enabling AMP
Mixed precision is enabled in TensorFlow by using the Automatic Mixed Precision (TF-AMP) extension which casts variables to half-precision upon retrieval while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients.
In TensorFlow, loss scaling can be applied statically by using simple multiplication of loss by a constant value or automatically, by TF-AMP. Automatic mixed precision makes all the adjustments internally in TensorFlow, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running TensorFlow models.
To enable mixed precision, you can simply add `--amp=True` to the training command. This will enable the following code:
```
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16', loss_scale='dynamic')
tf.keras.mixed_precision.experimental.set_policy(policy)
```
### TensorFloat-32 (TF32) Compute Mode
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math, also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require a high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following sections list the requirements in order to start training the EfficientDet model.
### Requirements
This repository contains a `Dockerfile` that extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- [TensorFlow 22.03-tf2-py3 NGC container or later](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow)
- Supported GPUs:
- [NVIDIA Volta architecture: 32GB](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture: 80GB](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, refer to the
following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning
Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
- [Running TensorFlow](https://docs.nvidia.com/deeplearning/frameworks/tensorflow-release-notes/running.html#running)
For those unable to use the TensorFlow NGC container, to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
For multi-node, the sample provided in this repository requires [Enroot](https://github.com/NVIDIA/enroot) and [Pyxis](https://github.com/NVIDIA/pyxis) to be set up on a [SLURM](https://slurm.schedmd.com) cluster.
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the EfficientDet on the COCO 2017 dataset. For the specifics concerning training and inference, refer to the [Advanced](#advanced) section.
### 1. Clone the repository
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/Tensorflow2/Detection/EfficientDet
```
### 2. Download and preprocess the dataset
To download COCO 2017 images and annotations and convert them to tfrecords, run the script as follows:
```bash
bash dataset/get_coco.sh
```
By default, the data is organized into the following structure:
```
</workspace/coco/>
train-*****-of-00256.tfrecord
val-*****-of-00032.tfrecord
```
### 3. Build the EfficientDet PyTorch NGC container
```
bash scripts/docker/build.sh
```
### 4. Start an interactive session in the NGC container to run training/inference
After you build the container image, you can start an interactive CLI session with
```
DATA=<path to coco tfrecords> BACKBONE_CKPT=<path to pretrained efficientnet checkpoint> bash scripts/docker/interactive.sh
```
Note: The `interactive.sh` script requires the location of the dataset and the pretrained checkpoint to be passed.
### 5. Start training
```
bash ./scripts/D0/convergence-{AMP, FP32, TF32}-{8, 32}x{V100-32G, A100-80G}.sh
```
The training scripts train EfficientDet-D0 and perform an evaluation on the COCO 2017 dataset. By default, the training script runs training on standard configuration (DGX A100/DGX-1 V100, AMP/FP32/TF32, 300 epochs). Run one of the scripts in `scripts/D0` directory using `bash scripts/D0/convergence-{AMP, FP32, TF32}-{8, 32}x{V100-32G, A100-80G}.sh`. Ensure COCO-2017 tfrecords are mounted to `/workspace/coco` and EfficientNet-B0 backbone weights are mounted to `/workspace/checkpoints`. The backbone checkpoint can be downloaded from [this](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/efficientnet_tf2_savedmodel_nohead_b0_amp_cosine) location.
After training, the logs are present in the model directory where the data is organized in the following structure:
```
</tmp/convergence-{AMP, FP32, TF32}-{8, 32}x{V100-32G, A100-80G}>
ema_weights *contains the ema checkpoints of the model, checkpointed after every 10 epochs of training*
checkpoint
emackpt-10.data-00000-of-00001
emackpt-10.index
emackpt-20.data-00000-of-00001
emackpt-20.index
...
emackpt-300.data-00000-of-00001
emackpt-300.index
emackpt-final *final savedmodel with ema weights which can be used for inference*
assets
variables
variables.data-00000-of-00001
variables.index
keras_metadata.pb
saved_model.pb
train *tensorboard logs*
events.out.tfevents.*
checkpoint
ckpt.data-00000-of-00001
ckpt.index
ckpt-final.data-00000-of-00001
ckpt-final.index
time_log.txt *dllogger logs*
train-<time_stamp>.log *training log file*
```
### 6. Start validation/evaluation
To run validation/evaluation for a standard configuration (DGX A100/DGX-1 V100, AMP/TF32/FP32, EfficientDet-D0),
run one of the evaluation scripts in `scripts/D0` directory using `bash scripts/D0/evaluate-{AMP, FP32, TF32}-{8, 32}x{A100-80G, V100-32G}.sh`.
The script requires:
- `CKPT` is the path to the checkpoint that needs to be evaluated. For example, `CKPT=/tmp/convergence-AMP-8xA100-80G/ema_weights/emackpt-300`
Evaluation command:
```bash
CKPT=<path to checkpoint> bash ./scripts/D0/evaluate-{AMP, FP32, TF32}-{8, 32}x{A100-80G, V100-32G}.sh
```
Ensure COCO-2017 is mounted in `/workspace/coco`.
### 7. Inference benchmark
Inference loop can be benchmarked by running the `scripts/D0/inference-benchmark.sh` script. The script requires:
- batch size to use for inference `BS`. For example, `BS=128`
- Boolean to enable/disable `AMP` (Automatic Mixed Precision)
Inference benchmark command:
```bash
BS=<batch size> AMP=<True/False for Automatic Mixed Precision> bash scripts/D0/inference-benchmark.sh
```
### 8. Inference/Prediction
Model predictions can be obtained by running the `scripts/D0/inference.sh` script. This script reads a test image and annotates the image after object detection by drawing boxes on the objects in the image. The script requires:
- `MODEL_DIR` in which the file `checkpoint` contains path to the latest checkpoint that needs to be used for inference. For example, `MODEL_DIR=/tmp/convergence-AMP-8xA100-80G/ema_weights`
Inference command:
```bash
MODEL_DIR=<path to trained model directory> bash scripts/D0/inference.sh
```
Note that the above script assumes that the test image is present in `testdata` and is named `img1.jpg`.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
Descriptions of the key scripts and folders are provided below.
- `model` and `object_detection` - Contains code to build individual components of the model such as backbone, FPN, RPN, classification and bbox heads, and so on.
- data - Contains code to convert raw data to tfrecords
- dataset - Contains code to build the data pipeline such as dataloader, transforms, dataset builder.
- scripts/ - Contains shell scripts to launch training and evaluation of the model and perform inference.
- D0/convergence-{AMP, TF32, FP32}-{8, 32}x{V100-32G, A100-80G}.sh - Launches model training
- D0/evaluate-{AMP, FP32, TF32}-{8, 32}x{A100-80G, V100-32G}.sh - Performs inference and computes mAP of predictions.
- D0/inference.sh - Performs inference on an image
- D0/training-benchmark-{AMP, TF32, FP32}-{V100-32G, A100-80G}.sh - Launches model training for 500 iterations to benchmark training
- D0/inference-benchmark.sh - Benchmarks inference
- docker/ - Scripts to build the docker image and to start an interactive session
- utils/
- Contains utility components like default hyper parameters, checkpoint utils, training utils, and so on.
- train.py - End to end to script to load data, build and train the model.
- eval.py - End to end script to load data and checkpoint to perform inference and compute mAP scores.
### Parameters
#### train.py script parameters
Important parameters for training are listed below with defaults.
### Command-line options
To display the full list of available options and their descriptions, use the --helpshort command-line option:
```
--amp: Enable mixed precision training
--backbone_init: Initialize backbone weights from
checkpoint in this directory
--batch_size: training local batch size
--benchmark: Train for a fixed number of steps for performance
--benchmark_steps: Train for these many steps to benchmark training
performance
--checkpoint_period: Save ema model weights after every X epochs for eval
--debug: Enable debug mode
--enable_map_parallelization:
Parallelize stateless map transformations in dataloader
--eval_samples: The number of samples for evaluation.
--hparams: Comma separated k=v pairs of hyperparameters or a module
containing attributes to use as hyperparameters
--log_filename: Filename for dllogger logs
--log_steps: Interval of steps between logging of batch level stats
--lr: Learning rate
--lr_tb: Log learning rate at each step to TB
--training_mode: Training mode (train/traineval/train300)
--model_dir: Location of model_dir
--model_name: Model name
--num_epochs: Number of epochs for training
--num_examples_per_epoch:
Number of examples in one epoch (coco is 117266).
Default is 120000.
--pretrained_ckpt: Start training from this EfficientDet checkpoint.
--seed: Random seed
--set_num_threads: Set inter-op and intra-op parallelism threads
--testdev_dir: COCO testdev dir. If not None, ignore val_json_file.
--time_history: Get time history
--training_file_pattern:
Glob for training data files, e.g., coco/train-*.tfrecord.
--use_fake_data: Use fake input
--use_xla: Use XLA
--val_file_pattern: Glob for eval tfrecords, e.g. coco/val-*.tfrecord.
--val_json_file: COCO validation JSON containing golden bounding boxes. If
None, use the ground truth from the dataloader. Ignored if
testdev_dir is not None.
--validate: Get validation loss after each epoch
--warmup_epochs: Number of warmup epochs
--warmup_value: Initial warmup value
```
The default `training_mode` (`traineval`) is training along with evaluation. Note that evaluation starts only after 200 epochs of training, and the frequency of evaluation can be set by setting `checkpoint_period=<every n epochs>` which is currently set to 10. Also, in the `traineval` `training_mode`, the model stops training at 300 epochs to avoid overfitting. To run only training without evaluation, set the `training_mode` to train. In this `training_mode`, the ema checkpoints are stored in path `model_dir/ema_weights/` every `checkpoint_period` (in epochs) . The training stops after training for 75% of the total number of epochs, and the last ema-weight checkpoint is evaluated.
For benchmarking only the training time for 300 epochs, the `training_mode` can be set to `train300` where the model trains for exactly 300 epochs without any evaluation.
### Getting the data
By default, the EfficientDet model is trained on the [COCO 2017](http://cocodataset.org/#download) dataset. This dataset comes with a training and validation set. Follow steps in the [Quick Start Guide](#quick-start-guide) to download and pre-process the dataset into tfrecord format.
### Training Process
Training is performed using the `train.py` script. The default parameters can be overridden by command-line arguments.
The training process can start from scratch or resume from a checkpoint.
By default, bash script `scripts/D0/convergence-{AMP, FP32, TF32}-8x{A100-80G, V100-32G}.sh` will start the training process from scratch with the following settings.
- Use 8 GPUs
- Saves checkpoints after every 10 epochs to `model_dir` which is `/tmp/convergence-{AMP, FP32, TF32}-8x{A100-80G, V100-32G}` folder
To resume from a checkpoint, just make sure that the `model_dir` stays the same and that the checkpoints saved are already present in it.
#### Multi-node
Multi-node runs can be launched on a Pyxis/enroot Slurm cluster (refer to [Requirements](#requirements)) with the `./scripts/D0/convergence-{AMP, FP32}-32xV100-32G.sub` script with the following command for a 4-node NVIDIA DGX V100 example:
```
checkpointdir=<path to efficientnet B0 pretrained checkpoint directory> datadir=<path to coco 2017 dataset in tfrecord format> sbatch N 4 --ntasks-per-node=8 --ntasks=32 ./scripts/D0/convergence-{AMP, FP32}-32xV100-32G.sub
```
Note that the `./scripts/D0/convergence-{AMP, FP32}-32xV100-32G.sub` script is a starting point that has to be adapted depending on the environment. In particular, variables such as `--container-image` handles the container image to train using, `datadir` handles the location of the COCO-2017 data, and `checkpointdir` has the path to the pre-trained backbone (EfficientNet) weights.
Refer to the file's content to view the full list of variables to adjust for your system.
## Performance
### Benchmarking
Benchmarking can be performed for both training and inference. Both the scripts run the EfficientDet model. You can specify whether benchmarking is performed in AMP, TF32, or FP32 by specifying it as an argument to the benchmarking scripts.
#### Training performance benchmark
Training benchmarking can be performed by running the script:
```
NGPU=<number of GPUs> bash scripts/D0/training-benchmark-{AMP, TF32, FP32}-{V100-32G, A100-80G}.sh
```
To train on 1 DGXA100-80G run script:
```
bash scripts/D0/training-benchmark-{AMP, TF32}-1xA100-80G.sh
```
#### Inference performance benchmark
Inference benchmarking can be performed by running the script:
```
AMP=<enable mixed precision training? TRUE/FALSE> BS=<inference batchsize> bash scripts/D0/inference-benchmark.sh
```
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training Accuracy Results
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `scripts/D0/convergence-{AMP, TF32}-8xA100-80G.sh` training script in the 22.03-tf2 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs while evaluating every 10 epochs after 200 epochs of training until the 300th epoch is completed.
| GPUs | Image size | Precision | Local Batch size | BBOX mAP | Time to train | Time to train - speedup (TF32 to mixed precision)
| --| --| -- | -- | -- | -- | --
| 8 | 512 x 512 | TF32 | 104 | 34.53 | 8.5 hrs | -
| 8 | 512 x 512 | FP16 | 200 | 34.27 | 4.6 hrs | 1.84x
##### Training accuracy: NVIDIA DGX-1 (8x V100 32GB)
Our results were obtained by running the `scripts/D0/convergence-{AMP, FP32}-8xV100-32G.sh` training script in the 22.03-tf2 NGC container on NVIDIA DGX-1 with 8x V100 32GB GPUs with no intermediate evaluation.
| GPUs | Image size | Precision | Local Batch size | BBOX mAP | Time to train | Time to train - speedup (FP32 to mixed precision)
| --| --| -- | -- | -- | -- | --
| 8 | 512 x 512 | FP32 | 40 | 34.42 | 16.9 hrs | -
| 8 | 512 x 512 | FP16 | 64 | 34.45 | 14.3 hrs | 1.18x
##### Training accuracy: NVIDIA DGX-1 (32x V100 32GB)
Our results were obtained by running the `scripts/D0/convergence-{AMP, FP32}-32xV100-32G.sub` training script in the 22.03-tf2 NGC container on NVIDIA DGX-1 with 32x V100 32GB GPUs with no intermediate evaluation.
| GPUs | Image size | Precision | Local Batch size | BBOX mAP | Time to train | Time to train - speedup (FP32 to mixed precision)
| --| --| -- | -- | -- | -- | --
| 32 | 512 x 512 | FP32 | 40 | 34.14 | 5.6 hrs | -
| 32 | 512 x 512 | FP16 | 64 | 34.02 | 4.19 hrs | 1.33x
##### Training loss curves

Here, the loss is simply the weighted sum of losses on the classification head and the bounding box head.
##### Training Stability Test
The following tables compare mAP scores across five different training runs with different seeds. The runs showcase consistent convergence on all five seeds with very little deviation.
| **Config** | **Seed 1** | **Seed 2** | **Seed 3** | **Seed 4** | **Seed 5** | **Mean** | **Standard Deviation** |
| --- | --- | ----- | ----- | --- | --- | ----- | ----- |
| 8 GPUs, final AP BBox | 34.38 | 34.56 | 34.3 | 34.34 | 34.4 | 34.39 | 0.1 |
#### Training Performance Results
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `scripts/D0/training-benchmark-{AMP, TP32}-A100-80G.sh` training script in the 22.03-tf2 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs. Performance numbers in images per second were averaged over an entire training epoch. The number of GPUs used to benchmark can be set as `NGPU=<4/8>`. For 1-gpu benchmarking run script `scripts/D0/training-benchmark-{AMP, TP32}-1xA100-80G.sh`
| GPUs | Throughput - TF32 (BS=104) | Throughput - mixed precision (BS=200) | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision
| --- | ----- | ----- | --- | --- | ----- |
| 1 | 162 | 397 | 2.45 | 1 | 1 |
| 8 | 1266 | 2711 | 2.14 | 7.81 | 6.82 |
##### Training performance: NVIDIA DGX-1 (8x V100 32GB)
Our results were obtained by running the `scripts/D0/training-benchmark-{AMP, FP32}-V100-32G.sh` training script in the 22.03-tf2 NGC container on NVIDIA DGX-1 with (8x V100 32GB) GPUs. Performance numbers in images per second were averaged over an entire training epoch. The number of GPUs used to benchmark can be set as `NGPU=<1/4/8>`.
| GPUs | Throughput - FP32 (BS=40) | Throughput - mixed precision (BS=64) | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision |
| --- | ----- | ----- | --- | --- | ----- |
| 1 | 113 | 232 | 2.05 | 1 | 1 |
| 8 | 645 | 777 | 1.2 | 5.7 | 3.34 |
To achieve similar results, follow the steps in the [Quick Start Guide](#quick-start-guide).
Note: The dataloader is a performance bottleneck for this model. So the training throughputs could be higher if the bottleneck is optimized further.
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (1x A100 80GB)
Our results were obtained by running the `scripts/inference-benchmark.sh` training script in the 22.03-tf2 NGC container on NVIDIA DGX A100 (1x A100 80GB) GPU. The image resolution is 512 x 512.
FP16 Inference Latency
| Batch size | Throughput Avg | Latency Avg (ms) | Latency 90% (ms) | Latency 95% (ms) | Latency 99% (ms)
| --- | ----- | ----- | ----- | ----- | ----- |
| 1 | 38 | 26.31 | 26.27 | 26.29 | 26.31 |
| 2 | 40 | 49.75 | 49.68 | 49.71 | 49.74 |
| 4 | 80 | 50.12 | 50.06 | 50.08 | 50.11 |
| 8 | 153 | 52.16 | 52.09 | 52.12 | 52.15 |
| 16 | 276 | 57.83 | 57.77 | 57.79 | 57.81 |
| 32 | 465 | 68.75 | 68.69 | 68.72 | 68.74 |
| 64 | 706 | 90.63 | 90.56 | 90.59 | 90.62 |
| 128 | 791 | 161.65 | 160.94 | 161.08 | 161.14 |
| 256 | 858 | 298.33 | 296.1 | 296.62 | 297.76 |
TF32 Inference Latency
| Batch size | Throughput Avg | Latency Avg (ms) | Latency 90% (ms) | Latency 95% (ms) | Latency 99% (ms)
| --- | ----- | ----- | ----- | ----- | ----- |
| 1 | 38 | 26.09 | 26 | 26.03 | 26.07 |
| 2 | 40 | 49.94 | 49.84 | 49.88 | 49.91 |
| 4 | 78 | 50.98 | 50.91 | 50.94 | 50.96 |
| 8 | 144 | 55.21 | 55.16 | 55.19 | 55.21 |
| 16 | 250 | 63.76 | 63.69 | 63.72 | 63.75 |
| 32 | 394 | 81.06 | 80.97 | 81 | 81.04 |
| 64 | 563 | 113.54 | 113.44 | 113.47 | 113.51 |
| 128 | 623 | 205.33 | 205.06 | 205.16 | 205.28 |
To achieve similar results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 32GB)
Our results were obtained by running the `scripts/inference-benchmark.sh` training script in the 22.03-tf2 NGC container on NVIDIA DGX-1 with 1x V100 32GB GPUs. The image resolution is 512 x 512.
FP16 Inference Latency
| Batch size | Throughput Avg | Latency Avg (ms) | Latency 90% (ms) | Latency 95% (ms) | Latency 99% (ms)
| --- | ----- | ----- | ----- | ----- | ----- |
| 1 | 35 | 27.84 | 27.67 | 27.74 | 27.82 |
| 2 | 40 | 49.81 | 49.42 | 49.62 | 49.77 |
| 4 | 81 | 49.35 | 49.3 | 49.32 | 49.34 |
| 8 | 146 | 54.51 | 54.44 | 54.47 | 54.5 |
| 16 | 245 | 65.07 | 65.01 | 65.04 | 65.06 |
| 32 | 366 | 87.24 | 87.1 | 87.16 | 87.22 |
| 64 | 477 | 134.09 | 133.98 | 134.02 | 134.07 |
| 128 | 497 | 257.39 | 257.09 | 257.19 | 257.34 |
FP32 Inference Latency
| Batch size | Throughput Avg | Latency Avg (ms) | Latency 90% (ms) | Latency 95% (ms) | Latency 99% (ms)
| --- | ----- | ----- | ----- | ----- | ----- |
| 1 | 36 | 27.21 | 27.02 | 27.11 | 27.19 |
| 2 | 39 | 51.04 | 50.81 | 50.91 | 51.01 |
| 4 | 78 | 51.23 | 51.19 | 51.21 | 51.22 |
| 8 | 135 | 59.06 | 58.98 | 59.02 | 59.06 |
| 16 | 214 | 74.73 | 74.64 | 74.68 | 74.71 |
| 32 | 305 | 104.76 | 104.67 | 104.72 | 104.76 |
| 64 | 374 | 171.08 | 170.92 | 170.98 | 171.05 |
| 128 | 385 | 332.11 | 331.81 | 331.92 | 332.06 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
## Release notes
### Changelog
May 2022
- Initial Release
### Known Issues
The dataloader is a bottleneck during training thus to gain any more performance boosts the dataloader needs to be improved.
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/jasper | jasper | speech_utils | #!/usr/bin/python
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import librosa
import soundfile as sf
import math
from os import system
import numpy as np
from tensorrtserver.api import *
import tensorrtserver.api.model_config_pb2 as model_config
import grpc
from tensorrtserver.api import api_pb2
from tensorrtserver.api import grpc_service_pb2
from tensorrtserver.api import grpc_service_pb2_grpc
WINDOWS_FNS = {"hanning": np.hanning, "hamming": np.hamming, "none": None}
def model_dtype_to_np(model_dtype):
if model_dtype == model_config.TYPE_BOOL:
return np.bool
elif model_dtype == model_config.TYPE_INT8:
return np.int8
elif model_dtype == model_config.TYPE_INT16:
return np.int16
elif model_dtype == model_config.TYPE_INT32:
return np.int32
elif model_dtype == model_config.TYPE_INT64:
return np.int64
elif model_dtype == model_config.TYPE_UINT8:
return np.uint8
elif model_dtype == model_config.TYPE_UINT16:
return np.uint16
elif model_dtype == model_config.TYPE_UINT32:
return np.uint32
elif model_dtype == model_config.TYPE_FP16:
return np.float16
elif model_dtype == model_config.TYPE_FP32:
return np.float32
elif model_dtype == model_config.TYPE_FP64:
return np.float64
elif model_dtype == model_config.TYPE_STRING:
return np.dtype(object)
return None
def ctc_decoder_predictions_tensor(prediction_cpu_tensor, batch_size, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Returns prediction
Args:
tensor: model output tensor
label: A list of labels
Returns:
prediction
"""
blank_id = len(labels) - 1
hypotheses = []
labels_map = dict([(i, labels[i]) for i in range(len(labels))])
# iterate over batch
prediction_cpu_tensor = prediction_cpu_tensor.reshape((batch_size, int(prediction_cpu_tensor.size/batch_size)))
for ind in range(batch_size):
prediction = prediction_cpu_tensor[ind].tolist()
# CTC decoding procedure
decoded_prediction = []
previous = len(labels) - 1 # id of a blank symbol
for p in prediction:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = ''.join([labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
class SpeechClient(object):
def __init__(self, url, protocol, model_name, model_version, batch_size,
model_platform=None, verbose=False,
mode="batch",
from_features=True):
self.model_name = model_name
self.model_version = model_version
self.verbose = verbose
self.batch_size = batch_size
self.transpose_audio_features = False
self.grpc_stub = None
self.ctx = None
self.correlation_id = 0
self.first_run = True
if mode == "streaming" or mode == "asynchronous":
self.correlation_id = 1
self.buffer = []
self.ctx = InferContext(url, protocol, model_name, model_version,
verbose, self.correlation_id, False)
server_ctx = ServerStatusContext(url, protocol, model_name,
verbose)
server_status = server_ctx.get_server_status()
self.audio_signals_name, self.num_samples_name, self.transcripts_name, \
self.audio_signals_type, self.num_samples_type, self.transcripts_type = self.parse_model(server_status, model_name,
batch_size, model_platform, verbose)
self.labels = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'", "<BLANK>"]
def postprocess(self, results, labels):
if len(results) != 1:
raise Exception("expected 1 result, got {}".format(len(results)))
transcript_values = results['TRANSCRIPT']
for transcript, filename in zip(transcript_values,
labels):
hypotheses = ctc_decoder_predictions_tensor(transcript, self.batch_size, self.labels)
print('---')
print('File: ', filename)
print("Final transcript: ", hypotheses)
print('---')
return hypotheses
def check_num_samples(self, num_samples):
if num_samples.data_type != model_config.TYPE_UINT32 and num_samples.data_type != model_config.TYPE_INT32:
raise Exception(
"expecting num_samples datatype to be TYPE_UINT32/TYPE_INT32, "
"model '" + model_name + "' output type is " +
model_config.DataType.Name(num_samples.data_type))
if len(num_samples.dims) != 1:
raise Exception("Expecting num_samples to have 1 dimension, "
"model '{}' num_samples has {}".format(
model_name,len(num_samples.dims)))
def parse_model(self, server_status,
model_name, batch_size,
model_platform=None, verbose=False):
"""
Check the configuration of the ensemble model
"""
if model_name not in server_status.model_status:
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
self.model_platform = model_platform
# Inputs are:
# 1) audio_signal: raw audio samples [num_samples]
# 2) sample_rate: sample rate of audio
# 3) num_samples: length of audio
if len(config.input) < 2:
raise Exception(
"expecting 2-3 inputs, got {}".format(len(config.input)))
# Outputs are:
# 1) transcripts: candidate transcripts
if len(config.output) != 1:
raise Exception(
"expecting 1 output, got {}".format(len(config.output)))
audio_signal = config.input[0]
if len(config.input) > 1:
num_samples = config.input[1]
self.check_num_samples(num_samples);
transcripts = config.output[0]
expected_audio_signal_dim = 1
expected_audio_signal_type = model_config.TYPE_FP32
if audio_signal.data_type != expected_audio_signal_type:
raise Exception("expecting audio_signal datatype to be " +
model_config.DataType.Name(
expected_audio_signal_type) +
"model '" + model_name + "' output type is " +
model_config.DataType.Name(audio_signal.data_type))
# Model specifying maximum batch size of 0 indicates that batching
# is not supported and so the input tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# image instance is inferred at a time).
max_batch_size = config.max_batch_size
if max_batch_size == 0:
if batch_size != 1:
raise Exception(
"batching not supported for model '" + model_name + "'")
else: # max_batch_size > 0
if batch_size > max_batch_size:
raise Exception(
"expecting batch size <= {} for model {}".format(
max_batch_size, model_name))
if len(audio_signal.dims) != expected_audio_signal_dim:
raise Exception("Expecting audio signal to have {} dimensions, "
"model '{}' audio_signal has {}".format(
expected_audio_signal_dim,
model_name,
len(audio_signal.dims)))
return (audio_signal.name, num_samples.name, transcripts.name,
model_dtype_to_np(audio_signal.data_type),
model_dtype_to_np(num_samples.data_type),
model_dtype_to_np(transcripts.data_type),
)
def update_audio_request(self, request, audio_generator):
for audio_signal, sample_rate, start, end in audio_generator:
# Delete the current inputs
input_batch = [audio_signal.astype(self.audio_signals_type)]
num_samples_batch = audio_signal.shape[0]
num_samples_batch = [np.asarray([num_samples_batch],
dtype=self.num_samples_type)]
flags = InferRequestHeader.FLAG_NONE
input_batch[0] = np.expand_dims(input_batch[0], axis=0)
audio_bytes = input_batch[0].tobytes()
num_samples_bytes = num_samples_batch[0].tobytes()
request.meta_data.input[0].dims[0] = audio_signal.shape[0]
request.meta_data.input[0].batch_byte_size = len(audio_bytes)
request.meta_data.input[1].dims[0] = 1
request.meta_data.input[1].batch_byte_size = len(num_samples_bytes)
if start:
request.meta_data.flags = flags | \
InferRequestHeader.FLAG_SEQUENCE_START
else:
request.meta_data.flags = flags;
# Send request with audio signal
del request.raw_input[:]
request.raw_input.extend([audio_bytes])
request.raw_input.extend([num_samples_bytes])
yield request
# If end, send empty request to flush out remaining audio
if end:
request.meta_data.flags = flags | \
InferRequestHeader.FLAG_SEQUENCE_END
zero_bytes = np.zeros(shape=input_batch[0].shape,
dtype=input_batch[0].dtype).tobytes()
del request.raw_input[:]
request.raw_input.extend([zero_bytes])
request.raw_input.extend([num_samples_bytes])
yield request
def recognize(self, audio_signal, filenames):
# Send requests of FLAGS.batch_size audio signals. If the number of
# audios isn't an exact multiple of FLAGS.batch_size then just
# start over with the first audio until the batch is filled.
flags = InferRequestHeader.FLAG_NONE
flags = flags | InferRequestHeader.FLAG_SEQUENCE_START
input_batch = []
input_filenames = []
max_num_samples_batch = 0
for idx in range(self.batch_size):
input_batch.append(audio_signal[idx].astype(
self.audio_signals_type))
input_filenames.append(filenames[idx])
num_samples = audio_signal[idx].shape[0]
if (num_samples > max_num_samples_batch):
max_num_samples_batch = num_samples
for idx in range(self.batch_size):
num_samples = input_batch[idx].shape[0]
print("num_samples : ", num_samples)
# input_batch[idx] = np.pad(input_batch[idx],
# ((0,
# max_num_samples_batch -
# num_samples)),
# mode='constant')
mean = np.mean(input_batch[idx])
std_var = np.std(input_batch[idx])
gauss_noise = np.random.normal(
mean,std_var,
max_num_samples_batch-num_samples)
input_batch[idx]= np.concatenate(
(input_batch[idx], gauss_noise.astype(
self.audio_signals_type)))
max_num_samples_batch = np.asarray([max_num_samples_batch],
dtype=self.num_samples_type)
num_samples_batch = [max_num_samples_batch] * self.batch_size
#print(num_samples_batch)
#print(input_batch)
#print(input_sample_rates)
# Send request
print("Sending request to transcribe file(s):", ",".join(
input_filenames))
if (self.model_platform == "obsolete_pyt"):
result = self.ctx.run(
{self.audio_signals_name: input_batch,
self.num_samples_name: num_samples_batch},
{self.transcripts_name: InferContext.ResultFormat.RAW},
self.batch_size, flags)
else:
result = self.ctx.run(
{self.audio_signals_name: input_batch,
self.num_samples_name: num_samples_batch},
{self.transcripts_name: InferContext.ResultFormat.RAW},
self.batch_size, flags)
hypotheses = self.postprocess(result, input_filenames)
return hypotheses
def preemphasis(signal, coeff=0.97):
return np.append(signal[0], signal[1:] - coeff * signal[:-1])
def normalize_signal(signal, gain=None):
"""
Normalize float32 signal to [-1, 1] range
"""
if gain is None:
gain = 1.0 / (np.max(np.abs(signal)) + 1e-5)
return signal * gain
class AudioSegment(object):
"""Monaural audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, samples, sample_rate, target_sr=16000, trim=False,
trim_db=60):
"""Create audio segment from samples.
Samples are convert float32 internally, with int scaled to [-1, 1].
"""
samples = self._convert_samples_to_float32(samples)
if target_sr is not None and target_sr != sample_rate:
samples = librosa.core.resample(samples, sample_rate, target_sr)
sample_rate = target_sr
if trim:
samples, _ = librosa.effects.trim(samples, trim_db)
self._samples = samples
self._sample_rate = sample_rate
if self._samples.ndim >= 2:
self._samples = np.mean(self._samples, 1)
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / 2 ** (bits - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
@classmethod
def from_file(cls, filename, target_sr=16000, int_values=False, offset=0,
duration=0, trim=False):
"""
Load a file supported by librosa and return as an AudioSegment.
:param filename: path of file to load
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:param offset: offset in seconds when loading audio
:param duration: duration in seconds when loading audio
:return: numpy array of samples
"""
with sf.SoundFile(filename, 'r') as f:
dtype = 'int32' if int_values else 'float32'
sample_rate = f.samplerate
if offset > 0:
f.seek(int(offset * sample_rate))
if duration > 0:
samples = f.read(int(duration * sample_rate), dtype=dtype)
else:
samples = f.read(dtype=dtype)
samples = samples.transpose()
return cls(samples, sample_rate, target_sr=target_sr, trim=trim)
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
# define our clear function
def clear_screen():
_ = system('clear')
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda | cuda | rpn_generate_proposals | /**
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
namespace rpn
{
namespace
{
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// The number of cuda threads to use. Since work is assigned to SMs at the
// granularity of a block, 128 is chosen to allow utilizing more SMs for
// smaller input sizes.
// 1D grid
constexpr int CUDA_NUM_THREADS = 128;
constexpr int MAXIMUM_NUM_BLOCKS = 4096;
inline int GetBlocks(const int N)
{
return std::max(
std::min(
(N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
/**
* d_sorted_score_keys -- indexes into _original_ scores
* nboxes_to_generate -- pre_nms_topn
*/
__global__ void GeneratePreNMSUprightBoxesKernel(
const long *d_sorted_scores_keys,
const int nboxes_to_generate,
const float *d_bbox_deltas, // [N, A*4, H, W]
const float4 *d_anchors,
const int H,
const int W,
const int K, // K = H*W
const int A,
const int KA, // KA = K*A
const float min_size,
const float *d_img_info_vec,
const int num_images,
const float bbox_xform_clip,
const bool correct_transform,
float4 *d_out_boxes,
const int prenms_nboxes, // leading dimension of out_boxes
float *d_inout_scores, // [N, A, H, W]
uint8_t *d_boxes_keep_flags,
bool is_channels_last)
{
// Going to generate pre_nms_nboxes boxes per image
for (int ibox = blockIdx.x * blockDim.x + threadIdx.x; ibox < nboxes_to_generate;
ibox += blockDim.x * gridDim.x)
{
for (int image_index = blockIdx.y * blockDim.y + threadIdx.y; image_index < num_images;
image_index += blockDim.y * gridDim.y)
{
// box_conv_index : # of the same box, but indexed in
// the scores from the conv layer, of shape (A,H,W)
// the num_images dimension was already removed
// box_conv_index = a*K + h*W + w
// Note: PyT code takes topK, so need to adjust the indexing for multi-image
// box_conv_index is _local_ to the image_index, need to adjust into global arrays
const int box_conv_index = d_sorted_scores_keys[image_index * prenms_nboxes + ibox];
// We want to decompose box_conv_index in (a,h,w)
// such as box_conv_index = a*K + h*W + w
// (avoiding modulos in the process)
int remaining = box_conv_index;
const int dA = K; // stride of A
const int a = remaining / dA;
remaining -= a * dA;
const int dH = W; // stride of H
const int h = remaining / dH;
remaining -= h * dH;
const int w = remaining; // dW = 1
int deltas_idx = (is_channels_last)? image_index * (KA * 4) + h * W * A * 4 + w * A * 4 + a * 4 : image_index * (KA * 4) + a * 4 * K + h * W + w;
int a_idx = h * W * A + w * A + a;
// Order of anchors is [N, H, W, A, 4]
const float4 anchor = d_anchors[image_index * KA + a_idx];
// x1,y1,x2,y2 :coordinates of anchor a, shifted for position (h,w)
float x1 = anchor.x;
float x2 = anchor.z;
float y1 = anchor.y;
float y2 = anchor.w;
// Deltas for that box
// Deltas of shape (num_images,4*A,K)
// We're going to compute 4 scattered reads
// better than the alternative, ie transposing the complete deltas
// array first
const float dx = d_bbox_deltas[deltas_idx];
// Stride of K between each dimension
int stride = (is_channels_last)? 1: K;
deltas_idx += stride;
const float dy = d_bbox_deltas[deltas_idx];
deltas_idx += stride;
float dw = d_bbox_deltas[deltas_idx];
deltas_idx += stride;
float dh = d_bbox_deltas[deltas_idx];
// Upper bound on dw,dh
dw = fmin(dw, bbox_xform_clip);
dh = fmin(dh, bbox_xform_clip);
// Applying the deltas
float width = x2 - x1 + 1.0f;
const float ctr_x = x1 + 0.5f * width;
const float pred_ctr_x = ctr_x + width * dx;
const float pred_w = width * expf(dw);
x1 = pred_ctr_x - 0.5f * pred_w;
x2 = pred_ctr_x + 0.5f * pred_w;
float height = y2 - y1 + 1.0f;
const float ctr_y = y1 + 0.5f * height;
const float pred_ctr_y = ctr_y + height * dy;
const float pred_h = height * expf(dh);
y1 = pred_ctr_y - 0.5f * pred_h;
y2 = pred_ctr_y + 0.5f * pred_h;
if (correct_transform)
{
x2 -= 1.0f;
y2 -= 1.0f;
}
// End of box_coder.decode(..) part
// Clipping box to image
// p = _clip_box_to_image(proposal, height, width)
const float img_height = d_img_info_vec[2 * image_index + 1];
const float img_width = d_img_info_vec[2 * image_index + 0];
const float min_size_scaled = min_size;
x1 = fmax(fmin(x1, img_width - 1.0f), 0.0f);
y1 = fmax(fmin(y1, img_height - 1.0f), 0.0f);
x2 = fmax(fmin(x2, img_width - 1.0f), 0.0f);
y2 = fmax(fmin(y2, img_height - 1.0f), 0.0f);
// Filter boxes
// Removing boxes with one dim < min_size
// (center of box is in image, because of previous step)
// keep = _filter_boxes(p, self.min_size, im_shape)
width = x2 - x1 + 1.0f;
height = y2 - y1 + 1.0f;
bool keep_box = fmin(width, height) >= min_size_scaled;
// We are not deleting the box right now even if !keep_box
// we want to keep the relative order of the elements stable
// we'll do it in such a way later
// d_boxes_keep_flags size: (num_images,prenms_nboxes)
// d_out_boxes size: (num_images,prenms_nboxes)
const int out_index = image_index * prenms_nboxes + ibox;
d_boxes_keep_flags[out_index] = keep_box;
d_out_boxes[out_index] = {x1, y1, x2, y2};
}
}
}
} // namespace
/**
* Generate boxes associated to topN pre-NMS scores
*/
std::vector<at::Tensor> GeneratePreNMSUprightBoxes(
const int num_images,
const int A,
const int H,
const int W,
at::Tensor &sorted_indices, // topK sorted pre_nms_topn indices
at::Tensor &sorted_scores, // topK sorted pre_nms_topn scores [N, A, H, W]
at::Tensor &bbox_deltas, // [N, A*4, H, W] (full, unsorted / sliced)
at::Tensor &anchors, // input (full, unsorted, unsliced)
at::Tensor &image_shapes, // (h, w) of images
const int pre_nms_nboxes,
const int rpn_min_size,
const float bbox_xform_clip_default,
const bool correct_transform_coords,
bool is_channels_last)
{
// constants
constexpr int box_dim = 4;
const int K = H * W;
// temp Tensors
at::Tensor boxes = at::zeros({num_images, box_dim * pre_nms_nboxes}, sorted_scores.options()).to(at::kFloat);
at::Tensor boxes_keep_flags = at::empty({num_images, pre_nms_nboxes}, sorted_scores.options()).to(at::kByte);
boxes_keep_flags.zero_();
auto stream = at::cuda::getCurrentCUDAStream().stream();
// Call kernel
GeneratePreNMSUprightBoxesKernel<<<
(GetBlocks(pre_nms_nboxes), num_images),
CUDA_NUM_THREADS, // blockDim.y == 1
0, stream>>>(
sorted_indices.data_ptr<long>(),
pre_nms_nboxes,
bbox_deltas.data_ptr<float>(),
reinterpret_cast<float4 *>(anchors.data_ptr<float>()),
H,
W,
K,
A,
K * A,
rpn_min_size,
image_shapes.data_ptr<float>(), // image size vec
num_images,
bbox_xform_clip_default, // utils::BBOX_XFORM_CLIP_DEFAULT
correct_transform_coords,
reinterpret_cast<float4 *>(boxes.data_ptr<float>()),
pre_nms_nboxes,
sorted_scores.data_ptr<float>(),
boxes_keep_flags.data_ptr<uint8_t>(),
is_channels_last);
C10_CUDA_CHECK(cudaGetLastError());
return std::vector<at::Tensor>{boxes, sorted_scores, boxes_keep_flags};
}
} // namespace rpn
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model | model | mask_rcnn | import tensorflow as tf
from mrcnn_tf2.model import anchors
from mrcnn_tf2.model.losses import MaskRCNNLoss, FastRCNNLoss, RPNLoss
from mrcnn_tf2.model.models.fpn import FPNNetwork
from mrcnn_tf2.model.models.heads import RPNHead, BoxHead, MaskHead
from mrcnn_tf2.model.models.resnet50 import ResNet50
from mrcnn_tf2.ops import roi_ops, spatial_transform_ops, postprocess_ops, training_ops
class MaskRCNN(tf.keras.Model):
def __init__(self, params, name='mrcnn', trainable=True, *args, **kwargs):
super().__init__(name=name, trainable=trainable, *args, **kwargs)
self._params = params
self.backbone = ResNet50()
self.fpn = FPNNetwork(
min_level=self._params.min_level,
max_level=self._params.max_level,
trainable=trainable
)
self.rpn_head = RPNHead(
name="rpn_head",
num_anchors=len(self._params.aspect_ratios * self._params.num_scales),
trainable=trainable
)
self.box_head = BoxHead(
num_classes=self._params.num_classes,
mlp_head_dim=self._params.fast_rcnn_mlp_head_dim,
trainable=trainable
)
self.mask_head = MaskHead(
num_classes=self._params.num_classes,
mrcnn_resolution=self._params.mrcnn_resolution,
trainable=trainable,
name="mask_head"
)
self.mask_rcnn_loss = MaskRCNNLoss()
self.fast_rcnn_loss = FastRCNNLoss(
num_classes=self._params.num_classes
)
self.rpn_loss = RPNLoss(
batch_size=self._params.train_batch_size,
rpn_batch_size_per_im=self._params.rpn_batch_size_per_im,
min_level=self._params.min_level,
max_level=self._params.max_level
)
def call(self, inputs, training=None, mask=None):
batch_size, image_height, image_width, _ = inputs['images'].get_shape().as_list()
if 'source_ids' not in inputs:
inputs['source_ids'] = -1 * tf.ones([batch_size], dtype=tf.float32)
outputs = dict(inputs)
all_anchors = anchors.Anchors(self._params.min_level, self._params.max_level,
self._params.num_scales, self._params.aspect_ratios,
self._params.anchor_scale,
(image_height, image_width))
backbone_feats = self.backbone(inputs['images'], training=training)
fpn_feats = self.fpn(backbone_feats, training=training)
outputs.update({'fpn_features': fpn_feats})
def rpn_head_fn(features, min_level=2, max_level=6):
"""Region Proposal Network (RPN) for Mask-RCNN."""
scores_outputs = dict()
box_outputs = dict()
for level in range(min_level, max_level + 1):
scores_outputs[level], box_outputs[level] = self.rpn_head(features[level], training=training)
return scores_outputs, box_outputs
rpn_score_outputs, rpn_box_outputs = rpn_head_fn(
features=fpn_feats,
min_level=self._params.min_level,
max_level=self._params.max_level
)
if training:
rpn_pre_nms_topn = self._params.train_rpn_pre_nms_topn
rpn_post_nms_topn = self._params.train_rpn_post_nms_topn
rpn_nms_threshold = self._params.train_rpn_nms_threshold
else:
rpn_pre_nms_topn = self._params.test_rpn_pre_nms_topn
rpn_post_nms_topn = self._params.test_rpn_post_nms_topn
rpn_nms_threshold = self._params.test_rpn_nms_thresh
rpn_box_scores, rpn_box_rois = roi_ops.multilevel_propose_rois(
scores_outputs=rpn_score_outputs,
box_outputs=rpn_box_outputs,
all_anchors=all_anchors,
image_info=inputs['image_info'],
rpn_pre_nms_topn=rpn_pre_nms_topn,
rpn_post_nms_topn=rpn_post_nms_topn,
rpn_nms_threshold=rpn_nms_threshold,
rpn_min_size=self._params.rpn_min_size,
bbox_reg_weights=None
)
rpn_box_rois = tf.cast(rpn_box_rois, dtype=tf.float32)
if training:
rpn_box_rois = tf.stop_gradient(rpn_box_rois)
rpn_box_scores = tf.stop_gradient(rpn_box_scores) # TODO Jonathan: Unused => Shall keep ?
# Sampling
box_targets, class_targets, rpn_box_rois, proposal_to_label_map = training_ops.proposal_label_op(
rpn_box_rois,
inputs['gt_boxes'],
inputs['gt_classes'],
batch_size_per_im=self._params.batch_size_per_im,
fg_fraction=self._params.fg_fraction,
fg_thresh=self._params.fg_thresh,
bg_thresh_hi=self._params.bg_thresh_hi,
bg_thresh_lo=self._params.bg_thresh_lo
)
# Performs multi-level RoIAlign.
box_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
features=fpn_feats,
boxes=rpn_box_rois,
output_size=7,
training=training
)
class_outputs, box_outputs, _ = self.box_head(inputs=box_roi_features)
if not training:
detections = postprocess_ops.generate_detections_gpu(
class_outputs=class_outputs,
box_outputs=box_outputs,
anchor_boxes=rpn_box_rois,
image_info=inputs['image_info'],
pre_nms_num_detections=self._params.test_rpn_post_nms_topn,
post_nms_num_detections=self._params.test_detections_per_image,
nms_threshold=self._params.test_nms,
bbox_reg_weights=self._params.bbox_reg_weights
)
outputs.update({
'num_detections': detections[0],
'detection_boxes': detections[1],
'detection_classes': detections[2],
'detection_scores': detections[3],
})
else: # is training
encoded_box_targets = training_ops.encode_box_targets(
boxes=rpn_box_rois,
gt_boxes=box_targets,
gt_labels=class_targets,
bbox_reg_weights=self._params.bbox_reg_weights
)
outputs.update({
'rpn_score_outputs': rpn_score_outputs,
'rpn_box_outputs': rpn_box_outputs,
'class_outputs': class_outputs,
'box_outputs': box_outputs,
'class_targets': class_targets,
'box_targets': encoded_box_targets,
'box_rois': rpn_box_rois,
})
# Faster-RCNN mode.
if not self._params.include_mask:
return outputs
# Mask sampling
if not training:
selected_box_rois = outputs['detection_boxes']
class_indices = outputs['detection_classes']
else:
selected_class_targets, selected_box_targets, \
selected_box_rois, proposal_to_label_map = training_ops.select_fg_for_masks(
class_targets=class_targets,
box_targets=box_targets,
boxes=rpn_box_rois,
proposal_to_label_map=proposal_to_label_map,
max_num_fg=int(self._params.batch_size_per_im * self._params.fg_fraction)
)
class_indices = tf.cast(selected_class_targets, dtype=tf.int32)
mask_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
features=fpn_feats,
boxes=selected_box_rois,
output_size=14,
training=training
)
mask_outputs = self.mask_head(
inputs=(mask_roi_features, class_indices),
training=training
)
if training:
mask_targets = training_ops.get_mask_targets(
fg_boxes=selected_box_rois,
fg_proposal_to_label_map=proposal_to_label_map,
fg_box_targets=selected_box_targets,
mask_gt_labels=inputs['cropped_gt_masks'],
output_size=self._params.mrcnn_resolution
)
outputs.update({
'mask_outputs': mask_outputs,
'mask_targets': mask_targets,
'selected_class_targets': selected_class_targets,
})
else:
outputs.update({
'detection_masks': tf.nn.sigmoid(mask_outputs),
})
if training:
self._add_losses(outputs)
# filter out only the needed outputs
model_outputs = [
'source_ids', 'image_info',
'num_detections', 'detection_boxes',
'detection_classes', 'detection_scores',
'detection_masks'
]
return {
name: tf.identity(tensor, name=name)
for name, tensor in outputs.items()
if name in model_outputs
}
def _add_losses(self, model_outputs):
mask_rcnn_loss = self.mask_rcnn_loss(model_outputs)
mask_rcnn_loss *= self._params.mrcnn_weight_loss_mask
self.add_loss(mask_rcnn_loss)
self.add_metric(mask_rcnn_loss, name='mask_rcnn_loss')
fast_rcnn_class_loss, fast_rcnn_box_loss = self.fast_rcnn_loss(model_outputs)
fast_rcnn_box_loss *= self._params.fast_rcnn_box_loss_weight
self.add_loss(fast_rcnn_box_loss)
self.add_metric(fast_rcnn_box_loss, name='fast_rcnn_box_loss')
self.add_loss(fast_rcnn_class_loss)
self.add_metric(fast_rcnn_class_loss, name='fast_rcnn_class_loss')
rpn_score_loss, rpn_box_loss = self.rpn_loss(model_outputs)
rpn_box_loss *= self._params.rpn_box_loss_weight
self.add_loss(rpn_box_loss)
self.add_metric(rpn_box_loss, name='rpn_box_loss')
self.add_loss(rpn_score_loss)
self.add_metric(rpn_score_loss, name='rpn_score_loss')
l2_regularization_loss = tf.add_n([
tf.nn.l2_loss(tf.cast(v, dtype=tf.float32))
for v in self.trainable_variables
if not any([pattern in v.name for pattern in ["batch_normalization", "bias", "beta"]])
])
l2_regularization_loss *= self._params.l2_weight_decay
self.add_loss(l2_regularization_loss)
self.add_metric(l2_regularization_loss, name='l2_regularization_loss')
def get_config(self):
pass
|
TensorFlow/Detection/SSD/examples | examples | SSD320_FP32_8GPU | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CKPT_DIR=${1:-"/results/SSD320_FP32_8GPU"}
PIPELINE_CONFIG_PATH=${2:-"/workdir/models/research/configs"}"/ssd320_full_8gpus.config"
GPUS=8
TENSOR_OPS=0
export TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
mkdir -p $CKPT_DIR
time mpirun --allow-run-as-root \
-np $GPUS \
-H localhost:$GPUS \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=INFO \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 \
-mca btl ^openib \
python -u ./object_detection/model_main.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--model_dir=${CKPT_DIR} \
--alsologtostder \
"${@:3}" 2>&1 | tee $CKPT_DIR/train_log
|
TensorFlow2/Detection/Efficientdet/efficientnet | efficientnet | efficientnet_model | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet model.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from typing import Any, Dict, Optional, Text, Tuple
import copy
import tensorflow as tf
from efficientnet.layers import simple_swish, hard_swish, identity, gelu, get_activation
from efficientnet.blocks import conv2d_block, mb_conv_block
from efficientnet.common_modules import round_filters, round_repeats, load_weights
from model import dataloader
def build_dict(name, args=None):
if name == "ModelConfig":
return_dict = copy.deepcopy(ModelConfig)
elif name == "BlockConfig":
return_dict = copy.deepcopy(BlockConfig)
else:
raise ValueError("Name of requested dictionary not found!")
if args is None:
return return_dict
if isinstance(args, dict):
return_dict.update(args)
elif isinstance(args, tuple):
return_dict.update( {a: p for a, p in zip(list(return_dict.keys()), args)} )
else:
raise ValueError("Expected tuple or dict!")
return return_dict
# Config for a single MB Conv Block.
BlockConfig = {
'input_filters': 0,
'output_filters': 0,
'kernel_size': 3,
'num_repeat': 1,
'expand_ratio': 1,
'strides': (1, 1),
'se_ratio': None,
'id_skip': True,
'fused_conv': False,
'conv_type': 'depthwise'
}
# Default Config for Efficientnet-B0.
ModelConfig = {
'width_coefficient': 1.0,
'depth_coefficient': 1.0,
'resolution': 224,
'dropout_rate': 0.2,
'blocks': (
# (input_filters, output_filters, kernel_size, num_repeat,
# expand_ratio, strides, se_ratio)
# pylint: disable=bad-whitespace
build_dict(name="BlockConfig", args=(32, 16, 3, 1, 1, (1, 1), 0.25)),
build_dict(name="BlockConfig", args=(16, 24, 3, 2, 6, (2, 2), 0.25)),
build_dict(name="BlockConfig", args=(24, 40, 5, 2, 6, (2, 2), 0.25)),
build_dict(name="BlockConfig", args=(40, 80, 3, 3, 6, (2, 2), 0.25)),
build_dict(name="BlockConfig", args=(80, 112, 5, 3, 6, (1, 1), 0.25)),
build_dict(name="BlockConfig", args=(112, 192, 5, 4, 6, (2, 2), 0.25)),
build_dict(name="BlockConfig", args=(192, 320, 3, 1, 6, (1, 1), 0.25)),
# pylint: enable=bad-whitespace
),
'stem_base_filters': 32,
'top_base_filters': 1280,
'activation': 'swish',
'batch_norm': 'default',
'bn_momentum': 0.99,
'bn_epsilon': 1e-3,
# While the original implementation used a weight decay of 1e-5,
# tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras
'weight_decay': 5e-6,
'drop_connect_rate': 0.0,
'depth_divisor': 8,
'min_depth': None,
'use_se': True,
'input_channels': 3,
'num_classes': 1000,
'model_name': 'efficientnet',
'rescale_input': True,
'data_format': 'channels_last',
'dtype': 'float32',
'weight_init': 'fan_in',
}
MODEL_CONFIGS = {
# (width, depth, resolution, dropout)
'efficientnet-b0': build_dict(name="ModelConfig", args=(1.0, 1.0, 224, 0.2)),
'efficientnet-b1': build_dict(name="ModelConfig", args=(1.0, 1.1, 240, 0.2)),
'efficientnet-b2': build_dict(name="ModelConfig", args=(1.1, 1.2, 260, 0.3)),
'efficientnet-b3': build_dict(name="ModelConfig", args=(1.2, 1.4, 300, 0.3)),
'efficientnet-b4': build_dict(name="ModelConfig", args=(1.4, 1.8, 380, 0.4)),
'efficientnet-b5': build_dict(name="ModelConfig", args=(1.6, 2.2, 456, 0.4)),
'efficientnet-b6': build_dict(name="ModelConfig", args=(1.8, 2.6, 528, 0.5)),
'efficientnet-b7': build_dict(name="ModelConfig", args=(2.0, 3.1, 600, 0.5)),
'efficientnet-b8': build_dict(name="ModelConfig", args=(2.2, 3.6, 672, 0.5)),
'efficientnet-l2': build_dict(name="ModelConfig", args=(4.3, 5.3, 800, 0.5)),
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1 / 3.0,
'mode': 'fan_in',
'distribution': 'uniform'
}
}
def efficientnet(image_input: tf.keras.layers.Input,
config: dict,
features_only: bool):
"""Creates an EfficientNet graph given the model parameters.
This function is wrapped by the `EfficientNet` class to make a tf.keras.Model.
Args:
image_input: the input batch of images
config: the model config
features_only: build only feature network
Returns:
the output of efficientnet
"""
depth_coefficient = config['depth_coefficient']
blocks = config['blocks']
stem_base_filters = config['stem_base_filters']
top_base_filters = config['top_base_filters']
activation = get_activation(config['activation'])
dropout_rate = config['dropout_rate']
drop_connect_rate = config['drop_connect_rate']
num_classes = config['num_classes']
input_channels = config['input_channels']
rescale_input = config['rescale_input']
data_format = tf.keras.backend.image_data_format()
dtype = config['dtype']
weight_decay = config['weight_decay']
weight_init = config['weight_init']
endpoints = {}
reduction_idx = 0
x = image_input
if data_format == 'channels_first':
# Happens on GPU/TPU if available.
x = tf.keras.layers.Permute((3, 1, 2))(x)
if rescale_input:
processor = dataloader.InputProcessor(image=x, output_size=x.shape)
processor.normalize_image(dtype=dtype)
x = processor.get_image()
# Build stem
x = conv2d_block(x,
round_filters(stem_base_filters, config),
config,
kernel_size=[3, 3],
strides=[2, 2],
activation=activation,
name='stem')
# Build blocks
num_blocks_total = sum(
round_repeats(block['num_repeat'], depth_coefficient) for block in blocks)
block_num = 0
for stack_idx, block in enumerate(blocks):
assert block['num_repeat'] > 0
is_reduction = False # reduction flag for blocks after the stem layer
# If the first block has super-pixel (space-to-depth) layer, then stem is
# the first reduction point.
if (block['strides'] == (2,2) and stack_idx == 0):
reduction_idx += 1
endpoints['reduction_%s' % reduction_idx] = x
elif ((stack_idx == len(blocks) - 1) or
blocks[stack_idx + 1]['strides'][0] > 1):
is_reduction = True
reduction_idx += 1
# Update block input and output filters based on depth multiplier
block.update({
'input_filters':round_filters(block['input_filters'], config),
'output_filters':round_filters(block['output_filters'], config),
'num_repeat':round_repeats(block['num_repeat'], depth_coefficient)})
# The first block needs to take care of stride and filter size increase
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config.update({'drop_connect_rate': drop_rate}) # TODO(Sugh) replace
block_prefix = 'stack_{}/block_0/'.format(stack_idx)
x = mb_conv_block(x, block, config, block_prefix)
block_num += 1
if block['num_repeat'] > 1:
block.update({
'input_filters':block['output_filters'],
'strides':(1, 1)
})
for block_idx in range(block['num_repeat'] - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config.update({'drop_connect_rate': drop_rate})
block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1)
x = mb_conv_block(x, block, config, prefix=block_prefix)
block_num += 1
if is_reduction:
endpoints['reduction_%s' % reduction_idx] = x
# Build top
if not features_only:
x = conv2d_block(x,
round_filters(top_base_filters, config),
config,
activation=activation,
name='top')
# Build classifier
DENSE_KERNEL_INITIALIZER['config']['mode'] = weight_init
x = tf.keras.layers.GlobalAveragePooling2D(name='top_pool')(x)
if dropout_rate and dropout_rate > 0:
x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x)
x = tf.keras.layers.Dense(
num_classes,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='logits')(x)
x = tf.keras.layers.Activation('softmax', name='probs', dtype=tf.float32)(x)
return [x] + list(
filter(lambda endpoint: endpoint is not None, [
endpoints.get('reduction_1'),
endpoints.get('reduction_2'),
endpoints.get('reduction_3'),
endpoints.get('reduction_4'),
endpoints.get('reduction_5'),
]))
@tf.keras.utils.register_keras_serializable(package='Vision')
class EfficientNet(tf.keras.Model):
"""Wrapper class for an EfficientNet Keras model.
Contains helper methods to build, manage, and save metadata about the model.
"""
def __init__(self,
config: Dict[Text, Any] = None,
features_only: bool = None,
overrides: Dict[Text, Any] = None):
"""Create an EfficientNet model.
Args:
config: (optional) the main model parameters to create the model
features_only: (optional) build the base feature network only
overrides: (optional) a dict containing keys that can override
config
"""
overrides = overrides or {}
config = config or build_dict(name="ModelConfig")
self.config = config
self.config.update(overrides)
input_channels = self.config['input_channels']
model_name = self.config['model_name']
input_shape = (None, None, input_channels) # Should handle any size image
image_input = tf.keras.layers.Input(shape=input_shape)
output = efficientnet(image_input, self.config, features_only)
# Cast to float32 in case we have a different model dtype
# output = tf.cast(output, tf.float32)
super(EfficientNet, self).__init__(
inputs=image_input, outputs=output, name=model_name)
@classmethod
def from_name(cls,
model_name: Text,
features_only: bool = None,
model_weights_path: Text = None,
weights_format: Text = 'saved_model',
overrides: Dict[Text, Any] = None):
"""Construct an EfficientNet model from a predefined model name.
E.g., `EfficientNet.from_name('efficientnet-b0')`.
Args:
model_name: the predefined model name
features_only: (optional) build the base feature network only
model_weights_path: the path to the weights (h5 file or saved model dir)
weights_format: the model weights format. One of 'saved_model', 'h5',
or 'checkpoint'.
overrides: (optional) a dict containing keys that can override config
Returns:
A constructed EfficientNet instance.
"""
model_configs = dict(MODEL_CONFIGS)
overrides = dict(overrides) if overrides else {}
# One can define their own custom models if necessary
model_configs.update(overrides.pop('model_config', {}))
if model_name not in model_configs:
raise ValueError('Unknown model name {}'.format(model_name))
config = model_configs[model_name]
model = cls(config=config, overrides=overrides, features_only=features_only)
if model_weights_path:
if weights_format == 'checkpoint' and tf.io.gfile.isdir(model_weights_path):
model_weights_path = tf.train.latest_checkpoint(model_weights_path)
load_weights(model, model_weights_path, weights_format=weights_format)
return model
|
PyTorch/SpeechSynthesis/FastPitch/notebooks | notebooks | FastPitch_voice_modification_custom | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Copyright 2020 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# # FastPitch: Voice Modification with Custom Transformations
# ## Model overview
# The [FastPitch](https://arxiv.org/abs/2006.06873) model is based on the [FastSpeech](https://arxiv.org/abs/1905.09263) model. Similarly to [FastSpeech2](https://arxiv.org/abs/2006.04558), which has been developed concurrently, it learns to predict the pitch contour and conditions the generation on such contour.
#
# The simple mechanism of predicting the pitch on grapheme-level (rather than frame-level, as FastSpeech2 does) allows to easily alter the pitch during synthesis. FastPitch can thus change the perceived emotional state of the speaker, or slightly emphasise certain lexical units.
# ## Requirements
# Run the notebook inside the container. By default the container forwards port `8888`.
# ```
# bash scripts/docker/interactive.sh
#
# # inside the container
# cd notebooks
# jupyter notebook --ip='*' --port=8888
# ```
# Please refer the Requirement section in `README.md` for more details and running outside the container.
# In[ ]:
import os
assert os.getcwd().split('/')[-1] == 'notebooks'
# ## Generate audio samples
# Training a FastPitch model from scrath takes 3 to 27 hours depending on the type and number of GPUs, performance numbers can be found in Section "Training performance results" in `README.md`. Therefore, to save the time of running this notebook, we recommend to download the pretrained FastPitch checkpoints on NGC for inference.
#
# You can find FP32 checkpoint at [NGC](https://ngc.nvidia.com/catalog/models/nvidia:fastpitch_pyt_fp32_ckpt_v1/files) , and AMP (Automatic Mixed Precision) checkpoint at [NGC](https://ngc.nvidia.com/catalog/models/nvidia:fastpitch_pyt_amp_ckpt_v1/files).
#
# To synthesize audio, you will need a WaveGlow model, which generates waveforms based on mel-spectrograms generated by FastPitch.You can download a pre-trained WaveGlow AMP model at [NGC](https://ngc.nvidia.com/catalog/models/nvidia:waveglow256pyt_fp16).
# In[ ]:
get_ipython().system(' mkdir -p output')
# Download grapheme-level model which will be easier to manipulate
get_ipython().system(' MODEL_ZIP="nvidia_fastpitch_200518.zip" MODEL="nvidia_fastpitch_200518.pt" MODEL_URL="https://api.ngc.nvidia.com/v2/models/nvidia/fastpitch_pyt_amp_ckpt_v1/versions/20.02.0/zip" MODEL_DIR=\'../pretrained_models/fastpitch\' ../scripts/download_fastpitch.sh')
get_ipython().system(" MODEL_DIR='../pretrained_models/waveglow' ../scripts/download_waveglow.sh")
# You can perform inference using the respective checkpoints that are passed as `--fastpitch` and `--waveglow` arguments. Next, you will use FastPitch model to generate audio samples for input text, including the basic version and the variations i npace, fade out, and pitch transforms, etc.
# In[ ]:
import IPython
# store paths in aux variables
fastp = '../pretrained_models/fastpitch/nvidia_fastpitch_200518.pt'
waveg = '../pretrained_models/waveglow/nvidia_waveglow256pyt_fp16.pt'
flags = f'--cuda --fastpitch {fastp} --waveglow {waveg} --wn-channels 256 --p-arpabet 0.0'
# ### 1. Basic speech synthesis
# You need to create an input file with some text, or just input the text in the below cell:
# In[ ]:
get_ipython().run_cell_magic('writefile', 'text.txt', 'This is a sample sentence you can synthesize using this wonderful model!\n')
# In[ ]:
# Basic synthesis
get_ipython().system('python ../inference.py {flags} -i text.txt -o output/original --pace 0.75 > /dev/null')
IPython.display.Audio("output/original/audio_0.wav")
# ### 2. 'Low - high, odd - even' speech transformation
# In[ ]:
get_ipython().run_cell_magic('writefile', '../fastpitch/pitch_transform.py', 'import torch\nimport numpy as np\n\ndef pitch_transform_custom(pitch, pitch_lens):\n """Apply a custom pitch transformation to predicted pitch values.\n\n Odd - even sentence transformation.\n This sample modification decreses the pitch for even words\n and increses the pitch for odd words in the sentence.\n\n PARAMS\n ------\n pitch: torch.Tensor (bs, max_len)\n Predicted pitch values for each lexical unit, padded to max_len (in Hz).\n pitch_lens: torch.Tensor (bs, max_len)\n Number of lexical units in each utterance.\n\n RETURNS\n -------\n pitch: torch.Tensor\n Modified pitch (in Hz).\n """\n \n sentence = \'This is a sample sentence you can synthesize using this wonderful model!\'\n sep_sums = np.cumsum(np.asarray([c == \' \' for c in sentence]))\n transform = np.where(sep_sums % 2 == 0, 0.6, 1.2)\n transform = torch.tensor(transform, dtype=torch.float32, device=pitch.device)\n\n return pitch * transform\n')
# In[ ]:
# Synthesis with pace 0.75 and odd-even sentence transformation
get_ipython().system('python ../inference.py {flags} -i text.txt -o output/custom --pitch-transform-custom --pace 0.75 > /dev/null')
IPython.display.Audio("output/custom/audio_0.wav")
# ### 3. 'Really' speech transformation
# In[ ]:
get_ipython().run_cell_magic('writefile', 'text.txt', 'Really? It sounds nothing like that.\n')
# In[ ]:
# Basic synthesis
get_ipython().system('python ../inference.py {flags} -i text.txt -o output/original_really > /dev/null')
IPython.display.Audio("output/original_really/audio_0.wav")
# In[ ]:
get_ipython().run_cell_magic('writefile', '../fastpitch/pitch_transform.py', 'import torch\n\ndef pitch_transform_custom(pitch, pitch_lens):\n \n sentence = "Really? I wouldn\'t be so sure."\n \n # Put emphasis on `lly?` in \'Really?\'\n for i in range(len(\'Rea\'), len(\'Really?\')):\n pitch[0][0, i] = 280 + (i - 3) * 20\n\n return pitch\n')
# In[ ]:
# Synthesis with 'really' question transformation and pace 0.9
get_ipython().system('python ../inference.py {flags} -i text.txt -o output/custom_really_question --pitch-transform-custom --pace 0.9 > /dev/null')
IPython.display.Audio("output/custom_really_question/audio_0.wav")
# In[ ]:
get_ipython().run_cell_magic('writefile', '../fastpitch/pitch_transform.py', "import torch\n\ndef pitch_transform_custom(pitch, pitch_lens):\n \n sentence = 'Really? It does not sound like that!'\n \n # Fixed 'really' word adjustment\n for i in range(len('Really?')):\n pitch[0][0, i] = 215 - i * 10\n\n return pitch * torch.tensor(0.8)\n")
# In[ ]:
# Synthesis with 'really' sceptical transformation and pace 0.9
get_ipython().system('python ../inference.py {flags} -i text.txt -o output/custom_really_sceptical --pitch-transform-custom --pace 0.9 > /dev/null')
IPython.display.Audio("output/custom_really_sceptical/audio_0.wav")
# ### 4. 'Right' speech transformation
# In[ ]:
get_ipython().run_cell_magic('writefile', 'text.txt', "It's obvious... right?\n")
# In[ ]:
# Basic synthesis
get_ipython().system('python ../inference.py {flags} -i text.txt -o output/original_right > /dev/null')
IPython.display.Audio("output/original_right/audio_0.wav")
# In[ ]:
get_ipython().run_cell_magic('writefile', '../fastpitch/pitch_transform.py', 'import torch\n\ndef pitch_transform_custom(pitch, pitch_lens):\n \n pitch[0][0, -6] = 180 # R\n pitch[0][0, -5] = 260 # i\n pitch[0][0, -4] = 360 # g\n pitch[0][0, -3] = 360 # h\n pitch[0][0, -2] = 380 # t\n pitch[0][0, -1] = 400 # ?\n\n return pitch * torch.tensor(0.9)\n')
# In[ ]:
# Synthesis with 'right' question transformation
get_ipython().system('python ../inference.py {flags} -i text.txt -o output/custom_right_question --pitch-transform-custom > /dev/null')
IPython.display.Audio("output/custom_right_question/audio_0.wav")
|
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils | utils | log_uniform_sampler | import numpy as np
import torch
from torch import nn
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
true_w, inputs) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
sample_w, inputs) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
# class LogUniformSampler(object):
# def __init__(self, range_max, unique=False):
# """
# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
# """
# self.range_max = range_max
# log_indices = torch.arange(1., range_max+2., 1.).log_()
# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# self.unique = unique
# if self.unique:
# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
# def sample(self, n_sample, labels):
# pos_sample, new_labels = labels.unique(return_inverse=True)
# n_pos_sample = pos_sample.size(0)
# n_neg_sample = n_sample - n_pos_sample
# if self.unique:
# self.exclude_mask.index_fill_(0, pos_sample, 1)
# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
# self.exclude_mask.index_fill_(0, pos_sample, 0)
# else:
# sample_dist = self.dist
# neg_sample = torch.multinomial(sample_dist, n_neg_sample)
# sample = torch.cat([pos_sample, neg_sample])
# sample_prob = self.dist[sample]
# return new_labels, sample, sample_prob
if __name__ == '__main__':
S, B = 3, 4
n_vocab = 10000
n_sample = 5
H = 32
labels = torch.LongTensor(S, B).random_(0, n_vocab)
# sampler = LogUniformSampler(n_vocab, unique=False)
# new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
sampler = LogUniformSampler(n_vocab, unique=True)
# true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
# print('true_probs', true_probs.numpy().tolist())
# print('samp_probs', samp_probs.numpy().tolist())
# print('neg_samples', neg_samples.numpy().tolist())
# print('sum', torch.sum(sampler.dist).item())
# assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
embedding = nn.Embedding(n_vocab, H)
bias = torch.zeros(n_vocab)
inputs = torch.Tensor(S, B, H).normal_()
logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
print('logits', logits.detach().numpy().tolist())
print('logits shape', logits.size())
print('out_labels', out_labels.detach().numpy().tolist())
print('out_labels shape', out_labels.size())
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs | configs | e2e_faster_rcnn_R_50_C4_1x | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50"
RPN:
PRE_NMS_TOP_N_TEST: 6000
POST_NMS_TOP_N_TEST: 1000
DATASETS:
TRAIN: ("coco_2014_train", "coco_2014_valminusminival")
TEST: ("coco_2014_minival",)
SOLVER:
BASE_LR: 0.01
WEIGHT_DECAY: 0.0001
STEPS: (120000, 160000)
MAX_ITER: 180000
IMS_PER_BATCH: 8
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50 | resnet50 | conv2d_block | import tensorflow as tf
class Conv2DBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, strides, padding='SAME',
use_batch_norm=True, use_relu=True, trainable=True,
trainable_batch_norm=False, *args, **kwargs):
super().__init__(trainable=trainable, *args, **kwargs)
self.conv2d = None
self.batch_norm = None
self.relu = None
self.conv2d = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=not use_batch_norm,
trainable=trainable
)
if use_batch_norm:
self.batch_norm = tf.keras.layers.BatchNormalization(
momentum=0.9,
scale=True,
epsilon=1e-05,
trainable=trainable and trainable_batch_norm,
fused=True,
center=True
)
if use_relu:
self.relu = tf.keras.layers.ReLU()
def call(self, inputs, training=None, **kwargs):
net = inputs
net = self.conv2d(net)
if self.batch_norm:
net = self.batch_norm(net, training=training)
if self.relu:
net = self.relu(net)
return net
|
TensorFlow2/Detection/Efficientdet/visualize | visualize | standard_fields | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
image_additional_channels: additional channels.
original_image: image in the original input size.
original_image_spatial_shape: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_image_confidences: image-level class confidences.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_confidences: box-level class confidences. The shape should be
the same as the shape of groundtruth_classes.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_keypoint_weights: groundtruth weight factor for keypoints.
groundtruth_label_weights: groundtruth label weights.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
is_annotated: whether an image has been labeled or not.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
multiclass_scores: the label score per class for each box.
context_features: a flattened list of contextual features.
context_feature_length: the fixed length of each feature in
context_features, used for reshaping.
valid_context_size: the valid context size, used in filtering the padded
context features.
"""
image = 'image'
image_additional_channels = 'image_additional_channels'
original_image = 'original_image'
original_image_spatial_shape = 'original_image_spatial_shape'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_image_confidences = 'groundtruth_image_confidences'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_confidences = 'groundtruth_confidences'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_keypoint_weights = 'groundtruth_keypoint_weights'
groundtruth_label_weights = 'groundtruth_label_weights'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
is_annotated = 'is_annotated'
true_image_shape = 'true_image_shape'
multiclass_scores = 'multiclass_scores'
context_features = 'context_features'
context_feature_length = 'context_feature_length'
valid_context_size = 'valid_context_size'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_multiclass_scores: class score distribution (including background)
for detection boxes in the image including background class.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
detection_keypoint_scores: contains detection keypoint scores.
num_detections: number of detections in the batch.
raw_detection_boxes: contains decoded detection boxes without Non-Max
suppression.
raw_detection_scores: contains class score logits for raw detection boxes.
detection_anchor_indices: The anchor indices of the detections after NMS.
detection_features: contains extracted features for each detected box
after NMS.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_multiclass_scores = 'detection_multiclass_scores'
detection_features = 'detection_features'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
detection_keypoint_scores = 'detection_keypoint_scores'
num_detections = 'num_detections'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_scores = 'raw_detection_scores'
detection_anchor_indices = 'detection_anchor_indices'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
is_crowd: is_crowd annotation per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
confidences = 'confidences'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_heatmaps = 'keypoint_heatmaps'
is_crowd = 'is_crowd'
class PredictionFields(object):
"""Naming conventions for standardized prediction outputs.
Attributes:
feature_maps: List of feature maps for prediction.
anchors: Generated anchors.
raw_detection_boxes: Decoded detection boxes without NMS.
raw_detection_feature_map_indices: Feature map indices from which each raw
detection box was produced.
"""
feature_maps = 'feature_maps'
anchors = 'anchors'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_feature_map_indices = 'raw_detection_feature_map_indices'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton | triton | metrics | import os
import pandas as pd
import numpy as np
import pickle
import torch
from criterions import QuantileLoss
from triton.deployment_toolkit.core import BaseMetricsCalculator
def update_argparser(parser):
parser.add_argument("--dataset", type=str, help="Path to dataset to be used", required=True)
parser.add_argument("--checkpoint", type=str, help="Path to checkpoint to be used", required=True)
def _unscale_per_id(config, values, ids, scalers):
# values = values.cpu().numpy()
num_horizons = config.example_length - config.encoder_length + 1
flat_values = pd.DataFrame(
values,
columns=[f't{j}' for j in range(num_horizons - values.shape[1], num_horizons)]
)
flat_values['id'] = ids
df_list = []
for idx, group in flat_values.groupby('id'):
scaler = scalers[idx]
group_copy = group.copy()
for col in group_copy.columns:
if not 'id' in col:
_col = np.expand_dims(group_copy[col].values, -1)
_t_col = scaler.inverse_transform(_col)[:,-1]
group_copy[col] = _t_col
df_list.append(group_copy)
flat_values = pd.concat(df_list, axis=0)
flat_values = flat_values[[col for col in flat_values if not 'id' in col]]
flat_tensor = torch.from_numpy(flat_values.values)
return flat_tensor
def _unscale(config, values, scaler):
# values = values.cpu().numpy()
num_horizons = config.example_length - config.encoder_length + 1
flat_values = pd.DataFrame(
values,
columns=[f't{j}' for j in range(num_horizons - values.shape[1], num_horizons)]
)
for col in flat_values.columns:
if not 'id' in col:
_col = np.expand_dims(flat_values[col].values, -1)
_t_col = scaler.inverse_transform(_col)[:,-1]
flat_values[col] = _t_col
flat_values = flat_values[[col for col in flat_values if not 'id' in col]]
flat_tensor = torch.from_numpy(flat_values.values)
return flat_tensor
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self, dataset, checkpoint):
state_dict = torch.load(os.path.join(checkpoint, "checkpoint.pt"))
self.config = state_dict['config']
self.predictions = []
self.targets = []
self.ids = []
self.scalers = pickle.load(open(os.path.join(dataset, 'tgt_scalers.bin'), 'rb'))
@property
def metrics(self):
targets = np.concatenate(self.targets, axis=0)
# targets = torch.cat(self.targets, dim=0)
predictions = np.concatenate(self.predictions, axis=0)
# predictions = torch.cat(self.predictions, dim=0)
ids = np.concatenate(self.ids, axis=0)
if self.config.scale_per_id:
unscaled_predictions = torch.stack(
[_unscale_per_id(self.config, predictions[:,:,i], ids, self.scalers) for i in range(len(self.config.quantiles))],
dim=-1)
unscaled_targets = _unscale_per_id(self.config, targets[:,:,0], ids, self.scalers).unsqueeze(-1)
else:
ids = None
unscaled_predictions = torch.stack(
[_unscale(self.config, predictions[:,:,i], self.scalers['']) for i in range(len(self.config.quantiles))],
dim=-1)
unscaled_targets = _unscale(self.config, targets[:,:,0], self.scalers['']).unsqueeze(-1)
losses = QuantileLoss(self.config)(unscaled_predictions, unscaled_targets)
normalizer = unscaled_targets.abs().mean()
q_risk = 2 * losses / normalizer
return {'test_p10': q_risk[0].cpu().numpy(), 'test_p50': q_risk[1].cpu().numpy(), 'test_p90': q_risk[2].cpu().numpy()}
def update(
self,
ids,
y_pred,
x,
y_real,
):
#can probably just pass all of this to the evaluator main class
self.predictions.append(y_pred["target__0"])
self.targets.append(y_real['target__0'][:,:,0][:,:,np.newaxis])
self.ids.append(ids)
# return self.metrics
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B4/training/TF32 | TF32 | convergence_8xA100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b4_cfg.py \
--mode train_and_eval \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 500 \
--save_checkpoint_freq 5 \
--train_batch_size 80 \
--eval_batch_size 80 \
--train_img_size 380 \
--eval_img_size 380 \
--augmenter_name autoaugment \
--mixup_alpha 0.2 \
--lr_decay cosine \
--memory_limit 81000 \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005 |
TensorFlow/Detection/SSD/models/research/slim/nets | nets | alexnet | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a model definition for AlexNet.
This work was first described in:
ImageNet Classification with Deep Convolutional Neural Networks
Alex Krizhevsky, Ilya Sutskever and Geoffrey E. Hinton
and later refined in:
One weird trick for parallelizing convolutional neural networks
Alex Krizhevsky, 2014
Here we provide the implementation proposed in "One weird trick" and not
"ImageNet Classification", as per the paper, the LRN layers have been removed.
Usage:
with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
outputs, end_points = alexnet.alexnet_v2(inputs)
@@alexnet_v2
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def alexnet_v2_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
biases_initializer=tf.constant_initializer(0.1),
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def alexnet_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='alexnet_v2',
global_pool=False):
"""AlexNet version 2.
Described in: http://arxiv.org/pdf/1404.5997v2.pdf
Parameters from:
github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
layers-imagenet-1gpu.cfg
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224 or set
global_pool=True. To use in fully convolutional mode, set
spatial_squeeze to false.
The LRN layers have been removed and change the initializers from
random_normal_initializer to xavier_initializer.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: the number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
logits. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original AlexNet.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0
or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=[end_points_collection]):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
scope='conv1')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool1')
net = slim.conv2d(net, 192, [5, 5], scope='conv2')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool2')
net = slim.conv2d(net, 384, [3, 3], scope='conv3')
net = slim.conv2d(net, 384, [3, 3], scope='conv4')
net = slim.conv2d(net, 256, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')
# Use conv2d instead of fully_connected layers.
with slim.arg_scope([slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)):
net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
alexnet_v2.default_image_size = 224
|
PyTorch/Classification/GPUNet/triton/125ms-D/runner | runner | start_NVIDIA-DGX-1-(1x-V100-32GB) | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Evaluate Runner
python3 -m "triton.125ms-D.runner.__main__" \
--config-path "triton/125ms-D/runner/config_NVIDIA-DGX-1-(1x-V100-32GB).yaml" \
--device 0 |
PyTorch/SpeechRecognition/Jasper/common | common | features | import math
import random
import librosa
import torch
import torch.nn as nn
class BaseFeatures(nn.Module):
"""Base class for GPU accelerated audio preprocessing."""
__constants__ = ["pad_align", "pad_to_max_duration", "max_len"]
def __init__(self, pad_align, pad_to_max_duration, max_duration,
sample_rate, window_size, window_stride, spec_augment=None,
cutout_augment=None):
super(BaseFeatures, self).__init__()
self.pad_align = pad_align
self.pad_to_max_duration = pad_to_max_duration
self.win_length = int(sample_rate * window_size) # frame size
self.hop_length = int(sample_rate * window_stride)
# Calculate maximum sequence length (# frames)
if pad_to_max_duration:
self.max_len = 1 + math.ceil(
(max_duration * sample_rate - self.win_length) / self.hop_length
)
if spec_augment is not None:
self.spec_augment = SpecAugment(**spec_augment)
else:
self.spec_augment = None
if cutout_augment is not None:
self.cutout_augment = CutoutAugment(**cutout_augment)
else:
self.cutout_augment = None
@torch.no_grad()
def calculate_features(self, audio, audio_lens):
return audio, audio_lens
def __call__(self, audio, audio_lens):
dtype = audio.dtype
audio = audio.float()
feat, feat_lens = self.calculate_features(audio, audio_lens)
feat = self.apply_padding(feat)
if self.cutout_augment is not None:
feat = self.cutout_augment(feat)
if self.spec_augment is not None:
feat = self.spec_augment(feat)
feat = feat.to(dtype)
return feat, feat_lens
def apply_padding(self, x):
if self.pad_to_max_duration:
x_size = max(x.size(-1), self.max_len)
else:
x_size = x.size(-1)
if self.pad_align > 0:
pad_amt = x_size % self.pad_align
else:
pad_amt = 0
padded_len = x_size + (self.pad_align - pad_amt if pad_amt > 0 else 0)
return nn.functional.pad(x, (0, padded_len - x.size(-1)))
class SpecAugment(nn.Module):
"""Spec augment. refer to https://arxiv.org/abs/1904.08779
"""
def __init__(self, freq_masks=0, min_freq=0, max_freq=10, time_masks=0,
min_time=0, max_time=10):
super(SpecAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.freq_masks = freq_masks
self.min_freq = min_freq
self.max_freq = max_freq
self.time_masks = time_masks
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for _ in range(self.freq_masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
f0 = torch.randint(0, max(1, sh[1] - w), size=(1,))
mask[idx, f0:f0+w] = 1
for _ in range(self.time_masks):
w = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
t0 = torch.randint(0, max(1, sh[2] - w), size=(1,))
mask[idx, :, t0:t0+w] = 1
return x.masked_fill(mask, 0)
class CutoutAugment(nn.Module):
"""Cutout. refer to https://arxiv.org/pdf/1708.04552.pdf
"""
def __init__(self, masks=0, min_freq=20, max_freq=20, min_time=5, max_time=5):
super(CutoutAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.masks = masks
self.min_freq = min_freq
self.max_freq = max_freq
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for i in range(self.masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
h = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
f0 = int(random.uniform(0, sh[1] - w))
t0 = int(random.uniform(0, sh[2] - h))
mask[idx, f0:f0+w, t0:t0+h] = 1
return x.masked_fill(mask, 0)
@torch.jit.script
def normalize_batch(x, seq_len, normalize_type: str):
# print ("normalize_batch: x, seq_len, shapes: ", x.shape, seq_len, seq_len.shape)
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :int(seq_len[i])].mean()
x_std[i] = x[i, :, :int(seq_len[i])].std()
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
@torch.jit.script
def stack_subsample_frames(x, x_lens, stacking: int = 1, subsampling: int = 1):
""" Stacks frames together across feature dim, and then subsamples
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim * stacking, num_frames / subsampling
"""
seq = [x]
for n in range(1, stacking):
tmp = torch.zeros_like(x)
tmp[:, :, :-n] = x[:, :, n:]
seq.append(tmp)
x = torch.cat(seq, dim=1)[:, :, ::subsampling]
if subsampling > 1:
x_lens = torch.ceil(x_lens.float() / subsampling).int()
if x.size(2) > x_lens.max().item():
assert abs(x.size(2) - x_lens.max().item()) <= 1
x = x[:,:,:x_lens.max().item()]
return x, x_lens
class FilterbankFeatures(BaseFeatures):
# For JIT, https://pytorch.org/docs/stable/jit.html#python-defined-constants
__constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length",
"log", "frame_splicing", "normalize"]
# torchscript: "center" removed due to a bug
def __init__(self, spec_augment=None, cutout_augment=None,
sample_rate=8000, window_size=0.02, window_stride=0.01,
window="hamming", normalize="per_feature", n_fft=None,
preemph=0.97, n_filt=64, lowfreq=0, highfreq=None, log=True,
dither=1e-5, pad_align=8, pad_to_max_duration=False,
max_duration=float('inf'), frame_splicing=1):
super(FilterbankFeatures, self).__init__(
pad_align=pad_align, pad_to_max_duration=pad_to_max_duration,
max_duration=max_duration, sample_rate=sample_rate,
window_size=window_size, window_stride=window_stride,
spec_augment=spec_augment, cutout_augment=cutout_augment)
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
#TORCHSCRIPT: Check whether or not we need this
self.dither = dither
self.frame_splicing = frame_splicing
self.n_filt = n_filt
self.preemph = preemph
highfreq = highfreq or sample_rate / 2
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length,
periodic=False) if window_fn else None
filterbanks = torch.tensor(
librosa.filters.mel(sr=sample_rate, n_fft=self.n_fft, n_mels=n_filt,
fmin=lowfreq, fmax=highfreq),
dtype=torch.float).unsqueeze(0)
# torchscript
self.register_buffer("fb", filterbanks)
self.register_buffer("window", window_tensor)
def get_seq_len(self, seq_len):
return torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
dtype=torch.int)
# TORCHSCRIPT: center removed due to bug
def stft(self, x):
spec = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length,
window=self.window.to(dtype=torch.float),
return_complex=True)
return torch.view_as_real(spec)
@torch.no_grad()
def calculate_features(self, x, seq_len):
dtype = x.dtype
seq_len = self.get_seq_len(seq_len)
# dither
if self.dither > 0:
x += self.dither * torch.randn_like(x)
# do preemphasis
if self.preemph is not None:
x = torch.cat(
(x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]), dim=1)
x = self.stft(x)
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# frame splicing if required
if self.frame_splicing > 1:
raise ValueError('Frame splicing not supported')
# normalize if required
x = normalize_batch(x, seq_len, normalize_type=self.normalize)
# mask to zero any values beyond seq_len in batch,
# pad to multiple of `pad_align` (for efficiency)
max_len = x.size(-1)
mask = torch.arange(max_len, dtype=seq_len.dtype, device=x.device)
mask = mask.expand(x.size(0), max_len) >= seq_len.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1), 0)
# TORCHSCRIPT: Is this del important? It breaks scripting
# del mask
return x.to(dtype), seq_len
|
PyTorch/LanguageModeling/BART/configs | configs | config_hf | {
"_num_labels": 3,
"activation_dropout": 0.0,
"activation_function": "gelu",
"add_final_layer_norm": false,
"attention_dropout": 0.0,
"bos_token_id": 0,
"classif_dropout": 0.0,
"d_model": 1024,
"decoder_attention_heads": 16,
"decoder_ffn_dim": 4096,
"decoder_layerdrop": 0.0,
"decoder_layers": 12,
"decoder_start_token_id": 2,
"dropout": 0.1,
"early_stopping": true,
"encoder_attention_heads": 16,
"encoder_ffn_dim": 4096,
"encoder_layerdrop": 0.0,
"encoder_layers": 12,
"eos_token_id": 2,
"force_bos_token_to_be_generated": true,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1",
"2": "LABEL_2"
},
"init_std": 0.02,
"is_encoder_decoder": true,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1,
"LABEL_2": 2
},
"length_penalty": 2.0,
"max_length": 142,
"max_position_embeddings": 1024,
"min_length": 56,
"model_type": "bart",
"no_repeat_ngram_size": 3,
"normalize_before": false,
"num_beams": 4,
"num_hidden_layers": 12,
"output_past": true,
"pad_token_id": 1,
"prefix": " ",
"scale_embedding": false,
"task_specific_params": {
"summarization": {
"early_stopping": true,
"length_penalty": 2.0,
"max_length": 142,
"min_length": 56,
"no_repeat_ngram_size": 3,
"num_beams": 4
}
},
"vocab_size": 50264
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.