relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
PyTorch/Classification/GPUNet | GPUNet | train | #!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import glob
import re
from pathlib import Path
import time
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import dllogger
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.utils
import yaml
from timm.data import (
AugMixDataset,
FastCollateMixup,
Mixup,
create_dataset,
create_loader,
resolve_data_config,
)
from timm.loss import (
JsdCrossEntropy,
LabelSmoothingCrossEntropy,
SoftTargetCrossEntropy,
)
from timm.models import (
convert_splitbn_model,
create_model,
load_checkpoint,
model_parameters,
resume_checkpoint,
safe_model_name,
)
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import *
from timm.utils import ApexScaler, NativeScaler
from torch.nn.parallel import DistributedDataParallel as NativeDDP
def cross_entropy_loss_with_soft_target(pred, soft_target):
logsoftmax = nn.LogSoftmax()
return torch.mean(torch.sum(-soft_target * logsoftmax(pred), 1))
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, "autocast") is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger("train")
# to enable Boolean in add_argument
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(
description="Training Config", add_help=False
)
parser.add_argument(
"-c",
"--config",
default="",
type=str,
metavar="FILE",
help="YAML config file specifying default arguments",
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
# DLlogger
parser.add_argument(
"--dllogger-name", default="/logs/log.json", type=str, help="name of dllogger file"
)
# Dataset / Model parameters
parser.add_argument("data_dir", metavar="DIR", help="path to dataset")
parser.add_argument(
"--dataset",
"-d",
metavar="NAME",
default="",
help="dataset type (default: ImageFolder/ImageTar if empty)",
)
parser.add_argument(
"--train-split",
metavar="NAME",
default="train",
help="dataset train split (default: train)",
)
parser.add_argument(
"--val-split",
metavar="NAME",
default="validation",
help="dataset validation split (default: validation)",
)
parser.add_argument(
"--model",
default="resnet101",
type=str,
metavar="MODEL",
help='Name of model to train (default: "countception"',
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="Start with pretrained version of specified network (if avail)",
)
parser.add_argument(
"--initial-checkpoint",
default="",
type=str,
metavar="PATH",
help="Initialize model from this checkpoint (default: none)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="Resume full model and optimizer state from checkpoint (default: none)",
)
parser.add_argument(
"--no-resume-opt",
action="store_true",
default=False,
help="prevent resume of optimizer state when resuming model",
)
parser.add_argument(
"--num-classes",
type=int,
default=None,
metavar="N",
help="number of label classes (Model default if None)",
)
parser.add_argument(
"--gp",
default=None,
type=str,
metavar="POOL",
help="Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.",
)
parser.add_argument(
"--img-size",
type=int,
default=None,
metavar="N",
help="Image patch size (default: None => model default)",
)
parser.add_argument(
"--input-size",
default=None,
nargs=3,
type=int,
metavar="N N N",
help="Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty",
)
parser.add_argument(
"--crop-pct",
default=None,
type=float,
metavar="N",
help="Input image center crop percent (for validation only)",
)
parser.add_argument(
"--mean",
type=float,
nargs="+",
default=None,
metavar="MEAN",
help="Override mean pixel value of dataset",
)
parser.add_argument(
"--std",
type=float,
nargs="+",
default=None,
metavar="STD",
help="Override std deviation of of dataset",
)
parser.add_argument(
"--interpolation",
default="",
type=str,
metavar="NAME",
help="Image resize interpolation type (overrides model)",
)
parser.add_argument(
"-b",
"--batch-size",
type=int,
default=32,
metavar="N",
help="input batch size for training (default: 32)",
)
parser.add_argument(
"-vb",
"--validation-batch-size-multiplier",
type=int,
default=1,
metavar="N",
help="ratio of validation batch size to training batch size (default: 1)",
)
# Optimizer parameters
parser.add_argument(
"--opt",
default="sgd",
type=str,
metavar="OPTIMIZER",
help='Optimizer (default: "sgd"',
)
parser.add_argument(
"--opt-eps",
default=None,
type=float,
metavar="EPSILON",
help="Optimizer Epsilon (default: None, use opt default)",
)
parser.add_argument(
"--opt-betas",
default=None,
type=float,
nargs="+",
metavar="BETA",
help="Optimizer Betas (default: None, use opt default)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="Optimizer momentum (default: 0.9)",
)
parser.add_argument(
"--weight-decay", type=float, default=0.0001, help="weight decay (default: 0.0001)"
)
parser.add_argument(
"--clip-grad",
type=float,
default=None,
metavar="NORM",
help="Clip gradient norm (default: None, no clipping)",
)
parser.add_argument(
"--clip-mode",
type=str,
default="norm",
help='Gradient clipping mode. One of ("norm", "value", "agc")',
)
# Learning rate schedule parameters
parser.add_argument(
"--sched",
default="step",
type=str,
metavar="SCHEDULER",
help='LR scheduler (default: "step"',
)
parser.add_argument(
"--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)"
)
parser.add_argument(
"--lr-noise",
type=float,
nargs="+",
default=None,
metavar="pct, pct",
help="learning rate noise on/off epoch percentages",
)
parser.add_argument(
"--lr-noise-pct",
type=float,
default=0.67,
metavar="PERCENT",
help="learning rate noise limit percent (default: 0.67)",
)
parser.add_argument(
"--lr-noise-std",
type=float,
default=1.0,
metavar="STDDEV",
help="learning rate noise std-dev (default: 1.0)",
)
parser.add_argument(
"--lr-cycle-mul",
type=float,
default=1.0,
metavar="MULT",
help="learning rate cycle len multiplier (default: 1.0)",
)
parser.add_argument(
"--lr-cycle-limit",
type=int,
default=1,
metavar="N",
help="learning rate cycle limit",
)
parser.add_argument(
"--warmup-lr",
type=float,
default=0.0001,
metavar="LR",
help="warmup learning rate (default: 0.0001)",
)
parser.add_argument(
"--min-lr",
type=float,
default=1e-5,
metavar="LR",
help="lower lr bound for cyclic schedulers that hit 0 (1e-5)",
)
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 2)",
)
parser.add_argument(
"--epoch-repeats",
type=float,
default=0.0,
metavar="N",
help="epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).",
)
parser.add_argument(
"--start-epoch",
default=None,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"--benchmark-steps",
default=None,
type=int,
metavar="N",
help="For benchmarking, run this number of steps per epoch instead of all.",
)
parser.add_argument(
"--decay-epochs",
type=float,
default=30,
metavar="N",
help="epoch interval to decay LR",
)
parser.add_argument(
"--warmup-epochs",
type=int,
default=3,
metavar="N",
help="epochs to warmup LR, if scheduler supports",
)
parser.add_argument(
"--cooldown-epochs",
type=int,
default=10,
metavar="N",
help="epochs to cooldown LR at min_lr, after cyclic schedule ends",
)
parser.add_argument(
"--patience-epochs",
type=int,
default=10,
metavar="N",
help="patience epochs for Plateau LR scheduler (default: 10",
)
parser.add_argument(
"--decay-rate",
"--dr",
type=float,
default=0.1,
metavar="RATE",
help="LR decay rate (default: 0.1)",
)
# Augmentation & regularization parameters
parser.add_argument(
"--no-aug",
action="store_true",
default=False,
help="Disable all training augmentation, override other train aug args",
)
parser.add_argument(
"--scale",
type=float,
nargs="+",
default=[0.08, 1.0],
metavar="PCT",
help="Random resize scale (default: 0.08 1.0)",
)
parser.add_argument(
"--ratio",
type=float,
nargs="+",
default=[3.0 / 4.0, 4.0 / 3.0],
metavar="RATIO",
help="Random resize aspect ratio (default: 0.75 1.33)",
)
parser.add_argument(
"--hflip", type=float, default=0.5, help="Horizontal flip training aug probability"
)
parser.add_argument(
"--vflip", type=float, default=0.0, help="Vertical flip training aug probability"
)
parser.add_argument(
"--color-jitter",
type=float,
default=0.4,
metavar="PCT",
help="Color jitter factor (default: 0.4)",
)
parser.add_argument(
"--aa",
type=str,
default=None,
metavar="NAME",
help='Use AutoAugment policy. "v0" or "original". (default: None)',
),
parser.add_argument(
"--aug-splits",
type=int,
default=0,
help="Number of augmentation splits (default: 0, valid: 0 or >=2)",
)
parser.add_argument(
"--jsd",
action="store_true",
default=False,
help="Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.",
)
parser.add_argument(
"--reprob",
type=float,
default=0.0,
metavar="PCT",
help="Random erase prob (default: 0.)",
)
parser.add_argument(
"--remode", type=str, default="const", help='Random erase mode (default: "const")'
)
parser.add_argument(
"--recount", type=int, default=1, help="Random erase count (default: 1)"
)
parser.add_argument(
"--resplit",
action="store_true",
default=False,
help="Do not random erase first (clean) augmentation split",
)
parser.add_argument(
"--mixup",
type=float,
default=0.0,
help="mixup alpha, mixup enabled if > 0. (default: 0.)",
)
parser.add_argument(
"--cutmix",
type=float,
default=0.0,
help="cutmix alpha, cutmix enabled if > 0. (default: 0.)",
)
parser.add_argument(
"--cutmix-minmax",
type=float,
nargs="+",
default=None,
help="cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)",
)
parser.add_argument(
"--mixup-prob",
type=float,
default=1.0,
help="Probability of performing mixup or cutmix when either/both is enabled",
)
parser.add_argument(
"--mixup-switch-prob",
type=float,
default=0.5,
help="Probability of switching to cutmix when both mixup and cutmix enabled",
)
parser.add_argument(
"--mixup-mode",
type=str,
default="batch",
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"',
)
parser.add_argument(
"--mixup-off-epoch",
default=0,
type=int,
metavar="N",
help="Turn off mixup after this epoch, disabled if 0 (default: 0)",
)
parser.add_argument(
"--smoothing", type=float, default=0.1, help="Label smoothing (default: 0.1)"
)
parser.add_argument(
"--train-interpolation",
type=str,
default="random",
help='Training interpolation (random, bilinear, bicubic default: "random")',
)
parser.add_argument(
"--drop", type=float, default=0.0, metavar="PCT", help="Dropout rate (default: 0.)"
)
parser.add_argument(
"--drop-connect",
type=float,
default=None,
metavar="PCT",
help="Drop connect rate, DEPRECATED, use drop-path (default: None)",
)
parser.add_argument(
"--drop-path",
type=float,
default=None,
metavar="PCT",
help="Drop path rate (default: None)",
)
parser.add_argument(
"--drop-block",
type=float,
default=None,
metavar="PCT",
help="Drop block rate (default: None)",
)
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument(
"--bn-tf",
action="store_true",
default=False,
help="Use Tensorflow BatchNorm defaults for models that support it (default: False)",
)
parser.add_argument(
"--bn-momentum",
type=float,
default=None,
help="BatchNorm momentum override (if not None)",
)
parser.add_argument(
"--bn-eps",
type=float,
default=None,
help="BatchNorm epsilon override (if not None)",
)
parser.add_argument(
"--sync-bn",
action="store_true",
help="Enable NVIDIA Apex or Torch synchronized BatchNorm.",
)
parser.add_argument(
"--dist-bn",
type=str,
default="",
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")',
)
parser.add_argument(
"--split-bn",
action="store_true",
help="Enable separate BN layers per augmentation split.",
)
# Model Exponential Moving Average
parser.add_argument(
"--model-ema",
action="store_true",
default=False,
help="Enable tracking moving average of model weights",
)
parser.add_argument(
"--model-ema-force-cpu",
action="store_true",
default=False,
help="Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.",
)
parser.add_argument(
"--model-ema-decay",
type=float,
default=0.9998,
help="decay factor for model weights moving average (default: 0.9998)",
)
# Misc
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--log-interval",
type=int,
default=50,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--recovery-interval",
type=int,
default=0,
metavar="N",
help="how many batches to wait before writing recovery checkpoint",
)
parser.add_argument(
"--checkpoint-hist",
type=int,
default=10,
metavar="N",
help="number of checkpoints to keep (default: 10)",
)
parser.add_argument(
"-j",
"--workers",
type=int,
default=2,
metavar="N",
help="how many training processes to use (default: 1)",
)
parser.add_argument(
"--save-images",
action="store_true",
default=False,
help="save images of input bathes every log interval for debugging",
)
parser.add_argument(
"--amp",
action="store_true",
default=False,
help="use NVIDIA Apex AMP or Native AMP for mixed precision training",
)
parser.add_argument(
"--apex-amp",
action="store_true",
default=False,
help="Use NVIDIA Apex AMP mixed precision",
)
parser.add_argument(
"--native-amp",
action="store_true",
default=False,
help="Use Native Torch AMP mixed precision",
)
parser.add_argument(
"--channels-last",
action="store_true",
default=False,
help="Use channels_last memory layout",
)
parser.add_argument(
"--pin-mem",
action="store_true",
default=False,
help="Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.",
)
parser.add_argument(
"--no-prefetcher",
action="store_true",
default=False,
help="disable fast prefetcher",
)
parser.add_argument(
"--output",
default="",
type=str,
metavar="PATH",
help="path to output folder (default: none, current dir)",
)
parser.add_argument(
"--experiment",
default="",
type=str,
metavar="NAME",
help="name of train experiment, name of sub-folder for output",
)
parser.add_argument(
"--eval-metric",
default="top1",
type=str,
metavar="EVAL_METRIC",
help='Best metric (default: "top1"',
)
parser.add_argument(
"--tta",
type=int,
default=0,
metavar="N",
help="Test/inference time augmentation (oversampling) factor. 0=None (default: 0)",
)
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument(
"--use-multi-epochs-loader",
action="store_true",
default=False,
help="use the multi-epochs-loader to save time at the beginning of every epoch",
)
parser.add_argument(
"--torchscript",
dest="torchscript",
action="store_true",
help="convert model torchscript for inference",
)
parser.add_argument(
"--log-wandb",
action="store_true",
default=False,
help="log training and validation metrics to wandb",
)
# Distillation
parser.add_argument(
"--enable-distill",
type=str2bool,
nargs="?",
const=True,
default=False,
metavar="Boolean",
help="to use distillation",
)
parser.add_argument(
"--test-teacher",
type=str2bool,
nargs="?",
const=True,
default=False,
metavar="Boolean",
help="to test the teacher before training",
)
parser.add_argument(
"--teacher", default="", type=str, metavar="MODEL", help="Name of teacher model"
)
parser.add_argument(
"--teacher-checkpoint",
default="",
type=str,
metavar="CHECKPOINT PATH",
help="The checkpoint to the teacher model",
)
parser.add_argument(
"--teacher-img-size",
default=224,
type=int,
metavar="INT",
help="image resolution for teacher",
)
from timm.models.registry import register_model
from configs.model_hub import get_configs
from models.gpunet_builder import GPUNet_Builder
@register_model
def gpunet_2(pretrained=False, **kwargs):
"""Constructs GPUNet-2."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="1.75ms", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_2",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_1(pretrained=False, **kwargs):
"""Constructs GPUNet-1."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.85ms", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_1",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_0(pretrained=False, **kwargs):
"""Constructs GPUNet-0."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.65ms", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_0",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_d1(pretrained=False, **kwargs):
"""Constructs GPUNet-D1."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="1.25ms-D", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_d1",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_d2(pretrained=False, **kwargs):
"""Constructs GPUNet-D2."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="2.25ms-D", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_d2",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_p0(pretrained=False, **kwargs):
"""Constructs GPUNet-P0."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.5ms-D", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_p0",
"crop_pct": 0.875,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
model.load_state_dict(torch.load(checkpoint_path))
return model
@register_model
def gpunet_p1(pretrained=False, **kwargs):
"""Constructs GPUNet-P1."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.8ms-D", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_p1",
"crop_pct": 0.875,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
model.load_state_dict(torch.load(checkpoint_path))
return model
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, "r") as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def unique_log_fpath(fpath):
"""Have a unique log filename for every separate run"""
log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1))
for f in glob.glob(f"{fpath}.*")])
return f"{fpath}.{log_num + 1}"
def main():
setup_default_logging()
args, args_text = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning(
"You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`"
)
args.prefetcher = not args.no_prefetcher
args.distributed = False
if "WORLD_SIZE" in os.environ:
args.distributed = int(os.environ["WORLD_SIZE"]) > 1
args.device = "cuda:0"
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.local_rank = int(os.environ.get("LOCAL_RANK", args.local_rank))
args.device = "cuda:%d" % args.local_rank
torch.cuda.set_device(args.local_rank)
print("->setting device:", args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info(
"Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d."
% (args.rank, args.world_size)
)
else:
_logger.info("Training with a single process on 1 GPUs.")
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = "apex"
elif args.native_amp and has_native_amp:
use_amp = "native"
elif args.apex_amp or args.native_amp:
_logger.warning(
"Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6"
)
random_seed(args.seed, args.rank)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint,
)
if args.num_classes is None:
assert hasattr(
model, "num_classes"
), "Model must have `num_classes` attr if not set on cmd line/config."
args.num_classes = (
model.num_classes
)
if args.distributed:
torch.distributed.barrier()
if args.local_rank == 0:
_logger.info(
f"Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}"
)
print(model)
dllogger_dir = os.path.dirname(args.dllogger_name)
if dllogger_dir and not os.path.exists(dllogger_dir):
os.makedirs(dllogger_dir, exist_ok=True)
log_path = args.dllogger_name
dllogger.init(
backends=[
dllogger.JSONStreamBackend(verbosity=1, filename=log_path, append=True),
dllogger.JSONStreamBackend(verbosity=1, filename=unique_log_fpath(log_path)),
dllogger.StdOutBackend(verbosity=0),
]
)
else:
dllogger.init(backends=[])
dllogger.metadata("train_loss", {"unit": None})
dllogger.metadata("items_sec", {"unit": "images/s"})
dllogger.metadata("val_loss", {"unit": None})
dllogger.metadata("val_top1", {"unit": None})
dllogger.metadata("val_top5", {"unit": None})
dllogger.metadata("top1", {"unit": None})
dllogger.metadata("top5", {"unit": None})
dllogger.metadata("average_ips", {"unit": "images/s"})
data_config = resolve_data_config(
vars(args), model=model, verbose=args.local_rank == 0
)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, "A split of 1 makes no sense"
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp != "native":
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
"Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using "
"zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled."
)
if args.torchscript:
assert not use_amp == "apex", "Cannot use APEX AMP with torchscripted model"
assert not args.sync_bn, "Cannot use SyncBatchNorm with torchscripted model"
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == "apex":
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX AMP. Training in mixed precision.")
elif use_amp == "native":
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info("Using native Torch AMP. Training in mixed precision.")
else:
if args.local_rank == 0:
_logger.info("AMP not enabled. Training in float32.")
# optionally resume from a checkpoint
resume_epoch = None
if args.resume and os.path.isfile(args.resume):
resume_epoch = resume_checkpoint(
model,
args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0,
)
elif args.resume and not os.path.isfile(args.resume):
print("Warning, resume indicated, but file not found, starting training over")
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model,
decay=args.model_ema_decay,
device="cpu" if args.model_ema_force_cpu else None,
)
if args.resume and os.path.isfile(args.resume):
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp != "native":
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(
model, device_ids=[args.local_rank]
) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info("Scheduled epochs: {}".format(num_epochs))
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset,
root=args.data_dir,
split=args.train_split,
is_training=True,
batch_size=args.batch_size,
repeats=args.epoch_repeats,
)
dataset_eval = create_dataset(
args.dataset,
root=args.data_dir,
split=args.val_split,
is_training=False,
batch_size=args.batch_size,
)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0.0 or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.num_classes,
)
if args.prefetcher:
assert (
not num_aug_splits
) # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config["interpolation"]
_logger.info("Before creating loader from GPU: %s", args.local_rank)
student_res = data_config["input_size"]
useTwoRes = False
if student_res != data_config["input_size"]:
useTwoRes = True
loader_train = create_loader(
dataset_train,
input_size=data_config["input_size"],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
)
teacher_res = (3, args.teacher_img_size, args.teacher_img_size)
student_res = (3, args.img_size, args.img_size)
print(
"teacher eval resolution: ",
teacher_res,
" student resolution:",
student_res,
" train resolution:",
data_config["input_size"],
)
# setup loss function
if args.jsd:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(
num_splits=num_aug_splits, smoothing=args.smoothing
).cuda()
elif mixup_active:
# smoothing is handled with mixup target transform
train_loss_fn = SoftTargetCrossEntropy().cuda()
elif args.smoothing:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
# setup the distillation
teacher_model = None
if args.enable_distill:
loader_teacher_eval = create_loader(
dataset_eval,
input_size=teacher_res,
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config["interpolation"],
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config["crop_pct"],
pin_memory=args.pin_mem,
)
if args.local_rank == 0:
_logger.info("#" * 10)
_logger.info("create distillation")
_logger.info("model: %s", args.teacher)
_logger.info("checkpoint: %s", args.teacher_checkpoint)
_logger.info("teacher image size: %s", args.teacher_img_size)
_logger.info("#" * 10)
assert args.teacher != ""
_logger.info("#####GPU: %s, reached the barrier", args.local_rank)
if args.distributed:
torch.distributed.barrier()
teacher_model = create_model(
args.teacher, pretrained=True, num_classes=args.num_classes, in_chans=3
)
teacher_model.cuda()
teacher_model.eval()
if args.test_teacher:
print("==start testing the teacher==")
if args.local_rank == 0 and args.test_teacher:
eval_metrics = validate(
teacher_model, loader_teacher_eval, validate_loss_fn, args
)
print(
"teacher evaluation results:",
" loss:",
eval_metrics["loss"],
" top1:",
eval_metrics["top1"],
" top5:",
eval_metrics["top5"],
)
if args.distributed:
torch.distributed.barrier()
loader_eval = create_loader(
dataset_eval,
input_size=student_res,
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config["interpolation"],
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config["crop_pct"],
pin_memory=args.pin_mem,
)
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
epoch_throughput = []
if args.local_rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = "-".join(
[
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config["input_size"][-1]),
]
)
exp_name = "checkpoints"
output_dir = get_outdir(
args.output if args.output else "./output/train", exp_name
)
decreasing = True if eval_metric == "loss" else False
saver = CheckpointSaver(
model=model,
optimizer=optimizer,
args=args,
model_ema=model_ema,
amp_scaler=loss_scaler,
checkpoint_dir=output_dir,
recovery_dir=output_dir,
decreasing=decreasing,
max_history=args.checkpoint_hist,
)
with open(os.path.join(output_dir, "args.yaml"), "w") as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, "set_epoch"):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch,
model,
loader_train,
optimizer,
train_loss_fn,
args,
lr_scheduler=lr_scheduler,
saver=saver,
output_dir=output_dir,
amp_autocast=amp_autocast,
loss_scaler=loss_scaler,
model_ema=model_ema,
mixup_fn=mixup_fn,
teacher_model=teacher_model,
student_res=student_res,
useTwoRes=useTwoRes,
benchmark_steps=args.benchmark_steps,
)
epoch_throughput.append(train_metrics["items_sec"])
dllogger.log(step=epoch, data={"train_loss": train_metrics["loss"], "items_sec": train_metrics["items_sec"]}, verbosity=1)
dllogger.log(step=(), data={"train_loss": train_metrics["loss"], "items_sec": train_metrics["items_sec"]}, verbosity=1)
if args.distributed and args.dist_bn in ("broadcast", "reduce"):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == "reduce")
eval_metrics = validate(
model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast
)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ("broadcast", "reduce"):
distribute_bn(model_ema, args.world_size, args.dist_bn == "reduce")
ema_eval_metrics = validate(
model_ema.module,
loader_eval,
validate_loss_fn,
args,
amp_autocast=amp_autocast,
log_suffix=" (EMA)",
)
eval_metrics = ema_eval_metrics
dllogger.log(step=epoch, data={"val_loss": eval_metrics["loss"], "val_top1": eval_metrics["top1"], "val_top5": eval_metrics["top5"]}, verbosity=1)
dllogger.log(step=(), data={"val_loss": eval_metrics["loss"], "val_top1": eval_metrics["top1"], "val_top5": eval_metrics["top5"]}, verbosity=1)
dllogger.flush()
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch,
train_metrics,
eval_metrics,
os.path.join(output_dir, "summary.csv"),
write_header=best_metric is None,
log_wandb=args.log_wandb and has_wandb,
)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(
epoch, metric=save_metric
)
if len(epoch_throughput) > 0:
mean_train_throughput = sum(epoch_throughput) / len(epoch_throughput)
else:
mean_train_throughput = 0
log_metrics = dict(eval_metrics)
log_metrics["average_ips"] = mean_train_throughput
dllogger.log(step=tuple(), data=log_metrics, verbosity=0)
dllogger.flush()
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info("*** Best metric: {0} (epoch {1})".format(best_metric, best_epoch))
def train_one_epoch(
epoch,
model,
loader,
optimizer,
loss_fn,
args,
lr_scheduler=None,
saver=None,
output_dir=None,
amp_autocast=suppress,
loss_scaler=None,
model_ema=None,
mixup_fn=None,
teacher_model=None,
student_res=None,
useTwoRes=False,
benchmark_steps=None,
):
if teacher_model is not None:
assert student_res is not None
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, "is_second_order") and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
if teacher_model is not None:
teacher_model.eval()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
rate_avg = 0
for batch_idx, (input, target) in enumerate(loader):
last_batch = (batch_idx == last_idx) or (batch_idx == benchmark_steps)
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
if teacher_model is not None and useTwoRes:
student_input = F.interpolate(
input, size=(student_res[1], student_res[2]), mode="bicubic"
)
with amp_autocast():
if teacher_model is not None and useTwoRes:
output = model(student_input)
else:
output = model(input)
loss = loss_fn(output, target)
if teacher_model is not None:
with torch.no_grad():
soft_logits = teacher_model(input).detach()
soft_label = F.softmax(soft_logits, dim=1)
kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)
loss = kd_loss + loss
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss,
optimizer,
clip_grad=args.clip_grad,
clip_mode=args.clip_mode,
parameters=model_parameters(
model, exclude_head="agc" in args.clip_mode
),
create_graph=second_order,
)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head="agc" in args.clip_mode),
value=args.clip_grad,
mode=args.clip_mode,
)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group["lr"] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
rate_avg = input.size(0) * args.world_size / batch_time_m.avg
if args.local_rank == 0:
_logger.info(
"{} Train: {} [{:>4d}/{} ({:>3.0f}%)] "
"Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) "
"Time: {batch_time.val:.3f}s, {rate:>7.2f}/s "
"({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) "
"LR: {lr:.3e} "
"Data: {data_time.val:.3f} ({data_time.avg:.3f})".format(
datetime.now().strftime("%d.%b %Y %H:%M:%S"),
epoch,
batch_idx,
len(loader),
100.0 * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m,
)
)
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, "train-batch-%d.jpg" % batch_idx),
padding=0,
normalize=True,
)
if (
saver is not None
and args.recovery_interval
and (last_batch or (batch_idx + 1) % args.recovery_interval == 0)
):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if (batch_idx == benchmark_steps):
break
if hasattr(optimizer, "sync_lookahead"):
optimizer.sync_lookahead()
return OrderedDict([("loss", losses_m.avg), ("items_sec", rate_avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=""):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0 : target.size(0) : reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (
last_batch or batch_idx % args.log_interval == 0
):
log_name = "Test" + log_suffix
_logger.info(
"{0}: [{1:>4d}/{2}] "
"Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) "
"Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) "
"Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) "
"Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})".format(
log_name,
batch_idx,
last_idx,
batch_time=batch_time_m,
loss=losses_m,
top1=top1_m,
top5=top5_m,
)
)
metrics = OrderedDict(
[("loss", losses_m.avg), ("top1", top1_m.avg), ("top5", top5_m.avg)]
)
return metrics
if __name__ == "__main__":
main()
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops | ops | roi_ops | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ROI-related ops."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from mrcnn_tf2.utils import box_utils
def _propose_rois_gpu(scores,
boxes,
anchor_boxes,
height,
width,
scale,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights):
"""Proposes RoIs giva group of candidates (GPU version).
Args:
scores: a tensor with a shape of [batch_size, num_boxes].
boxes: a tensor with a shape of [batch_size, num_boxes, 4],
in the encoded form.
anchor_boxes: an Anchors object that contains the anchors with a shape of
[batch_size, num_boxes, 4].
height: a tensor of shape [batch_size, 1, 1] representing the image height.
width: a tensor of shape [batch_size, 1, 1] representing the image width.
scale: a tensor of shape [batch_size, 1, 1] representing the image scale.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
bbox_reg_weights: None or a list of four integer specifying the weights used
when decoding the box.
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals. It has same dtype as input
scores.
boxes: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
represneting the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax]. It has same dtype as
input boxes.
"""
batch_size, num_boxes = scores.get_shape().as_list()
topk_limit = min(num_boxes, rpn_pre_nms_topn)
boxes = box_utils.decode_boxes(boxes, anchor_boxes, bbox_reg_weights)
boxes = box_utils.clip_boxes(boxes, height, width)
if rpn_min_size > 0.0:
boxes, scores = box_utils.filter_boxes(
boxes,
tf.expand_dims(scores, axis=-1),
rpn_min_size,
height,
width,
scale
)
scores = tf.squeeze(scores, axis=-1)
post_nms_topk_limit = topk_limit if topk_limit < rpn_post_nms_topn else rpn_post_nms_topn
if rpn_nms_threshold > 0:
# Normalize coordinates as combined_non_max_suppression currently
# only support normalized coordinates.
pre_nms_boxes = box_utils.to_normalized_coordinates(boxes, height, width)
pre_nms_boxes = tf.reshape(pre_nms_boxes, [batch_size, num_boxes, 1, 4])
pre_nms_scores = tf.reshape(scores, [batch_size, num_boxes, 1])
# fixed problems when running with Keras AMP
pre_nms_boxes = tf.cast(pre_nms_boxes, dtype=tf.float32)
pre_nms_scores = tf.cast(pre_nms_scores, dtype=tf.float32)
with tf.device('CPU:0'):
boxes, scores, _, _ = tf.image.combined_non_max_suppression(
pre_nms_boxes,
pre_nms_scores,
max_output_size_per_class=topk_limit,
max_total_size=post_nms_topk_limit,
iou_threshold=rpn_nms_threshold,
score_threshold=0.0,
pad_per_class=False
)
boxes = box_utils.to_absolute_coordinates(boxes, height, width)
else:
scores, boxes = box_utils.top_k(scores, k=post_nms_topk_limit, boxes_list=[boxes])
boxes = boxes[0]
return scores, boxes
def multilevel_propose_rois(scores_outputs,
box_outputs,
all_anchors,
image_info,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights):
"""Proposes RoIs given a group of candidates from different FPN levels.
Args:
scores_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in
[batch_size, height, width, num_anchors * 4]
all_anchors: an Anchors object that contains the all anchors.
image_info: a tensor of shape [batch_size, 5] where the three columns
encode the input image's [height, width, scale,
original_height, original_width]. Height and width are for
the input to the network, not the original image; scale is the scale
factor used to scale the network input size to the original image size.
See dataloader.DetectionInputProcessor for details. The last two are
original height and width. See dataloader.DetectionInputProcessor for
details.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
bbox_reg_weights: None or a list of four integer specifying the weights used
when decoding the box.
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals.
rois: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
representing the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax].
"""
with tf.name_scope('multilevel_propose_rois'):
levels = scores_outputs.keys()
scores = []
rois = []
anchor_boxes = all_anchors.get_unpacked_boxes()
height = tf.expand_dims(image_info[:, 0:1], axis=-1)
width = tf.expand_dims(image_info[:, 1:2], axis=-1)
scale = tf.expand_dims(image_info[:, 2:3], axis=-1)
for level in levels:
with tf.name_scope('level_%d' % level) as scope:
batch_size, feature_h, feature_w, num_anchors_per_location = scores_outputs[level].get_shape().as_list()
num_boxes = feature_h * feature_w * num_anchors_per_location
this_level_scores = tf.reshape(scores_outputs[level], [batch_size, num_boxes])
this_level_scores = tf.sigmoid(this_level_scores)
this_level_boxes = tf.reshape(box_outputs[level], [batch_size, num_boxes, 4])
this_level_anchors = tf.cast(
tf.reshape(
tf.expand_dims(anchor_boxes[level], axis=0) *
tf.ones([batch_size, 1, 1, 1]),
[batch_size, num_boxes, 4]
),
dtype=this_level_scores.dtype
)
this_level_scores, this_level_boxes = _propose_rois_gpu(
this_level_scores,
this_level_boxes,
this_level_anchors,
height,
width,
scale,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights
)
scores.append(this_level_scores)
rois.append(this_level_boxes)
scores = tf.concat(scores, axis=1)
rois = tf.concat(rois, axis=1)
with tf.name_scope('roi_post_nms_topk'):
post_nms_num_anchors = scores.shape[1]
post_nms_topk_limit = min(post_nms_num_anchors, rpn_post_nms_topn)
top_k_scores, top_k_rois = box_utils.top_k(
scores,
k=post_nms_topk_limit,
boxes_list=[rois]
)
top_k_rois = top_k_rois[0]
return top_k_scores, top_k_rois
|
TensorFlow2/Classification/ConvNets/utils | utils | optimizer_factory | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer factory for vision tasks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
import tensorflow_addons as tfa
from typing import Any, Dict, Text, List
from tensorflow import keras
from tensorflow_addons.optimizers import MovingAverage
# pylint: disable=protected-access
from utils import learning_rate
def fetch_optimizer(model,opt_type) -> tf.keras.optimizers.Optimizer:
"""Get the base optimizer used by the current model."""
# this is the case where our target optimizer is not wrapped by any other optimizer(s)
if isinstance(model.optimizer,opt_type):
return model.optimizer
# Dive into nested optimizer object until we reach the target opt
opt = model.optimizer
while hasattr(opt, '_optimizer'):
opt = opt._optimizer
if isinstance(opt,opt_type):
return opt
raise TypeError(f'Failed to find {opt_type} in the nested optimizer object')
# Inspired from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py
class GradientAccumulator(object):
"""Distribution strategies-aware gradient accumulation utility."""
def __init__(self):
"""Initializes the accumulator."""
self._gradients = []
self._accum_steps = tf.Variable(
initial_value=0, dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA
)
@property
def step(self):
"""Number of accumulated steps."""
return self._accum_steps.value()
@property
def gradients(self):
"""The accumulated gradients."""
return list(
gradient.value() if gradient is not None else gradient for gradient in self._get_replica_gradients()
)
def __call__(self, gradients):
"""Accumulates :obj:`gradients`."""
if not self._gradients:
self._gradients.extend(
[
tf.Variable(tf.zeros_like(gradient), trainable=False) if gradient is not None else gradient
for gradient in gradients
]
)
if len(gradients) != len(self._gradients):
raise ValueError("Expected %s gradients, but got %d" % (len(self._gradients), len(gradients)))
for accum_gradient, gradient in zip(self._get_replica_gradients(), gradients):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(gradient)
self._accum_steps.assign_add(1)
def reset(self):
"""Resets the accumulated gradients."""
if self._gradients:
self._accum_steps.assign(0)
for gradient in self._get_replica_gradients():
if gradient is not None:
gradient.assign(tf.zeros_like(gradient))
def normalize(self):
"""Normalizes the accumulated gradients."""
for gradient in self._get_replica_gradients():
if gradient is not None:
gradient.assign(gradient*tf.cast(1/self._accum_steps, gradient.dtype))
def _get_replica_gradients(self):
if tf.distribute.has_strategy():
# In a replica context, we want to accumulate gradients on each replica
# without synchronization, so we directly assign the value of the
# current replica.
replica_context = tf.distribute.get_replica_context()
if replica_context is None or tf.distribute.get_strategy().num_replicas_in_sync == 1:
return self._gradients
return (
gradient.device_map.select_for_current_replica(gradient.values, replica_context)
for gradient in self._gradients
if gradient is not None
)
else:
return self._gradients
class HvdMovingAverage(MovingAverage):
def swap_weights(self):
"""Swap the average and moving weights.
The original function in the parent class assumes a cross replica
context, which fails for single GPU training. It also failed in the case of
multi-GPU training with Horovod.
"""
self._swap_weights()
def _create_slots(self, var_list):
"""[summary]
The original function in the parent class, in addition to calling
_create_slots() of the base optimizer, reassigns trainable tensors to
self._average_weights and self._model_weights, which has the effect of
removing non-trainable tensors (e.g., moving means and variances) from EMA.
By overriding it, we simply keep the part that calls _create_slots of the base
optimizer. To make up for the removed part of the code, we call shadow_copy, which
assigns both trainable and non-trainable tensors to self._average_weights and
self._model_weights.
Args:
var_list ([type]): [description]
"""
self._optimizer._create_slots(var_list=var_list)
def apply_gradients(self, grads_and_vars, name=None, experimental_aggregate_gradients=True):
self._optimizer._iterations = self.iterations
result = super().apply_gradients(grads_and_vars, name)
# update EMA weights after the weights are updated
self.update_average(self._optimizer.iterations)
return result
def _resource_apply_dense(self, grad, var):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_dense(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_sparse(grad, var, indices)
def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_sparse_duplicate_indices(
grad, var, indices)
@tf.function
def update_average(self, step: tf.Tensor):
step = tf.cast(step, tf.float32)
average_decay = self._get_hyper("average_decay", tf.dtypes.float32)
if step < self._start_step:
decay = tf.constant(0., tf.float32)
elif self._dynamic_decay:
decay = step - self._start_step
decay = tf.minimum(average_decay, (1. + decay) / (10. + decay))
else:
decay = average_decay
def _apply_moving(v_moving, v_normal):
diff = v_moving - v_normal
v_moving.assign_sub(tf.cast(1. - decay, v_moving.dtype) * diff)
return v_moving
def _update(strategy, v_moving_and_v_normal):
for v_moving, v_normal in v_moving_and_v_normal:
strategy.extended.update(v_moving, _apply_moving, args=(v_normal,))
ctx = tf.distribute.get_replica_context()
return ctx.merge_call(_update, args=(zip(self._average_weights,
self._model_weights),))
@classmethod
def from_config(cls, config, custom_objects=None):
optimizer = tf.keras.optimizers.deserialize(
config.pop('optimizer'),
custom_objects=custom_objects,
)
# For some reason, it is necessary to pass the optimizer as a keyword arg
return cls(optimizer=optimizer, **config)
def build_optimizer(
optimizer_name: Text,
base_learning_rate: tf.keras.optimizers.schedules.LearningRateSchedule,
params: Dict[Text, Any]):
"""Build the optimizer based on name.
Args:
optimizer_name: String representation of the optimizer name. Examples:
sgd, momentum, rmsprop.
base_learning_rate: `tf.keras.optimizers.schedules.LearningRateSchedule`
base learning rate.
params: String -> Any dictionary representing the optimizer params.
This should contain optimizer specific parameters such as
`base_learning_rate`, `decay`, etc.
Returns:
A tf.keras.Optimizer.
Raises:
ValueError if the provided optimizer_name is not supported.
"""
optimizer_name = optimizer_name.lower()
if optimizer_name == 'sgd':
nesterov = params.get('nesterov', False)
optimizer = tf.keras.optimizers.SGD(learning_rate=base_learning_rate,
nesterov=nesterov)
elif optimizer_name == 'momentum':
nesterov = params.get('nesterov', False)
optimizer = tf.keras.optimizers.SGD(learning_rate=base_learning_rate,
momentum=params['momentum'],
nesterov=nesterov)
elif optimizer_name == 'rmsprop':
rho = params.get('decay', None) or params.get('rho', 0.9)
momentum = params.get('momentum', 0.9)
epsilon = params.get('epsilon', 1e-07)
optimizer = tf.keras.optimizers.RMSprop(learning_rate=base_learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon)
elif optimizer_name == 'adam':
beta_1 = params.get('beta_1', 0.9)
beta_2 = params.get('beta_2', 0.999)
epsilon = params.get('epsilon', 1e-07)
optimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
elif optimizer_name == 'adamw':
weight_decay = params.get('weight_decay', 0.01)
beta_1 = params.get('beta_1', 0.9)
beta_2 = params.get('beta_2', 0.999)
epsilon = params.get('epsilon', 1e-07)
optimizer = tfa.optimizers.AdamW(weight_decay=weight_decay,
learning_rate=base_learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
else:
raise ValueError('Unknown optimizer %s' % optimizer_name)
if params.get('lookahead', None):
optimizer = tfa.optimizers.Lookahead(optimizer)
# Moving average should be applied last, as it's applied at test time
moving_average_decay = params.get('moving_average_decay', 0.)
if moving_average_decay is not None and moving_average_decay > 0.:
optimizer = HvdMovingAverage(# tfa.optimizers.MovingAverage
optimizer,
average_decay=moving_average_decay,
dynamic_decay=True)
return optimizer
def build_learning_rate(params: Dict[Text, Any],
batch_size: int = None,
train_steps: int = None,
max_epochs: int = None):
"""Build the learning rate given the provided configuration."""
decay_type = params['name']
base_lr = params['initial_lr']
decay_rate = params['decay_rate']
if params['decay_epochs'] is not None:
decay_steps = params['decay_epochs'] * train_steps
else:
decay_steps = 0
if params['warmup_epochs'] is not None:
warmup_steps = params['warmup_epochs'] * train_steps
else:
warmup_steps = 0
lr_multiplier = params['scale_by_batch_size']
if lr_multiplier and lr_multiplier > 0:
# Scale the learning rate based on the batch size and a multiplier
base_lr *= lr_multiplier * batch_size
if decay_type == 'exponential':
lr = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=base_lr,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=params['staircase'])
elif decay_type == 'piecewise_constant_with_warmup':
lr = learning_rate.PiecewiseConstantDecayWithWarmup(
batch_size=batch_size,
epoch_size=params['examples_per_epoch'],
warmup_epochs=params['warmup_epochs'],
boundaries=params['boundaries'],
multipliers=params['multipliers'])
elif decay_type == 'cosine':
decay_steps = (max_epochs - params['warmup_epochs']) * train_steps
lr = tf.keras.experimental.CosineDecay(
initial_learning_rate=base_lr,
decay_steps=decay_steps,
alpha=0.0
)
elif decay_type == 'linearcosine':
decay_steps = (max_epochs - params['warmup_epochs']) * train_steps
lr = tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate=base_lr,
decay_steps=decay_steps,
initial_variance=0.5,
variance_decay=0.55,
num_periods=0.5, alpha=0.0, beta=0.001
)
if warmup_steps > 0:
if decay_type != 'piecewise_constant_with_warmup':
lr = learning_rate.WarmupDecaySchedule(lr, warmup_steps)
return lr
|
TensorFlow2/LanguageModeling/BERT/data | data | bertPrep | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BookscorpusTextFormatting
import Downloader
import TextSharding
import WikicorpusTextFormatting
import PubMedTextFormatting
import argparse
import itertools
import multiprocessing
import os
import pprint
import subprocess
def main(args):
working_dir = os.environ['BERT_PREP_WORKING_DIR']
print('Working Directory:', working_dir)
print('Action:', args.action)
print('Dataset Name:', args.dataset)
if args.input_files:
args.input_files = args.input_files.split(',')
hdf5_tfrecord_folder_prefix = "/lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \
+ "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \
+ "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor) \
+ "_shard_" + str(args.n_training_shards) + "_test_split_" + str(int(args.fraction_test_set * 100))
directory_structure = {
'download' : working_dir + '/download', # Downloaded and decompressed
'extracted' : working_dir +'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor)
'formatted' : working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same
'sharded' : working_dir + '/sharded',
'tfrecord' : working_dir + '/tfrecord' + hdf5_tfrecord_folder_prefix,
'hdf5': working_dir + '/hdf5'+ hdf5_tfrecord_folder_prefix,
}
print('\nDirectory Structure:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(directory_structure)
print('')
if args.action == 'download':
if not os.path.exists(directory_structure['download']):
os.makedirs(directory_structure['download'])
downloader = Downloader.Downloader(args.dataset, directory_structure['download'])
downloader.download()
elif args.action == 'text_formatting':
assert args.dataset != 'google_pretrained_weights' and args.dataset != 'nvidia_pretrained_weights' \
and args.dataset != 'squad' and args.dataset != 'mrpc' and args.dataset != 'cola' and \
args.dataset != 'mnli' and args.dataset != 'sst-2', 'Cannot perform text_formatting on pretrained weights'
if not os.path.exists(directory_structure['extracted']):
os.makedirs(directory_structure['extracted'])
if not os.path.exists(directory_structure['formatted']):
os.makedirs(directory_structure['formatted'])
if args.dataset == 'bookscorpus':
books_path = directory_structure['download'] + '/bookscorpus'
#books_path = directory_structure['download']
output_filename = directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt'
books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(books_path, output_filename, recursive=True)
books_formatter.merge()
elif args.dataset == 'wikicorpus_en':
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = 'python -m wikiextractor.WikiExtractor'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
wiki_path = directory_structure['extracted'] + '/wikicorpus_en'
output_filename = directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
elif args.dataset == 'wikicorpus_zh':
assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.'
if args.skip_wikiextractor == 0:
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
print('WikiExtractor Command:', wikiextractor_command)
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
wiki_path = directory_structure['extracted'] + '/wikicorpus_zh'
output_filename = directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt'
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
wiki_formatter.merge()
elif args.dataset == 'pubmed_baseline':
pubmed_path = directory_structure['download'] + '/pubmed' + '/baseline'
output_filename = directory_structure['formatted'] + '/pubmed_baseline_one_article_per_line.txt'
pubmed_formatter = PubMedTextFormatting.PubMedTextFormatting(pubmed_path, output_filename, recursive=True)
pubmed_formatter.merge()
elif args.action == 'sharding':
# Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces)
if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset or 'pubmed' in args.dataset:
if args.input_files is None:
if args.dataset == 'bookscorpus':
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt']
elif args.dataset == 'wikicorpus_en':
args.input_files = [directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
elif args.dataset == 'wikicorpus_zh':
args.input_files = [directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt']
elif args.dataset == 'books_wiki_en_corpus':
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
elif args.dataset == 'pubmed_baseline':
args.input_files = [directory_structure['formatted'] + '/pubmed_baseline_one_article_per_line.txt']
output_file_prefix = directory_structure['sharded'] + '/' + args.dataset + '/' + args.dataset
if not os.path.exists(directory_structure['sharded']):
os.makedirs(directory_structure['sharded'])
if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset):
os.makedirs(directory_structure['sharded'] + '/' + args.dataset)
if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset + '/training'):
os.makedirs(directory_structure['sharded'] + '/' + args.dataset + '/training')
if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset + '/test'):
os.makedirs(directory_structure['sharded'] + '/' + args.dataset + '/test')
# Segmentation is here because all datasets look the same in one article/book/whatever per line format, and
# it seemed unnecessarily complicated to add an additional preprocessing step to call just for this.
# Different languages (e.g., Chinese simplified/traditional) may require translation and
# other packages to be called from here -- just add a conditional branch for those extra steps
segmenter = TextSharding.NLTKSegmenter()
sharding = TextSharding.Sharding(args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set)
sharding.load_articles()
sharding.segment_articles_into_sentences(segmenter)
sharding.distribute_articles_over_shards()
sharding.write_shards_to_disk()
else:
assert False, 'Unsupported dataset for sharding'
elif args.action == 'create_tfrecord_files':
if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset):
os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset)
if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset + '/training'):
os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset + '/training')
if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset + '/test'):
os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset + '/test')
last_process = None
def create_record_worker(filename_prefix, shard_id, output_format='tfrecord', split='training'):
bert_preprocessing_command = 'python /workspace/bert_tf2/create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + split + '/' + filename_prefix + '_' + str(shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + directory_structure['tfrecord'] + '/' + args.dataset + '/' + split + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length)
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq)
bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob)
bert_preprocessing_command += ' --random_seed=' + str(args.random_seed)
bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor)
bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % args.n_processes == 0 and shard_id > 0:
bert_preprocessing_process.wait()
return last_process
output_file_prefix = args.dataset
for i in range(args.n_training_shards):
last_process = create_record_worker(output_file_prefix + '_training', i, 'tfrecord', 'training')
last_process.wait()
for i in range(args.n_test_shards):
last_process = create_record_worker(output_file_prefix + '_test', i, 'tfrecord', 'test')
last_process.wait()
elif args.action == 'create_hdf5_files':
assert False, 'HDF5 format not fully supported in this release.'
if not os.path.exists(directory_structure['hdf5'] + "/" + args.dataset):
os.makedirs(directory_structure['hdf5'] + "/" + args.dataset)
last_process = None
def create_record_worker(filename_prefix, shard_id, output_format='hdf5'):
bert_preprocessing_command = 'python /workspace/bert_tf2/create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + directory_structure['hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + args.max_seq_length
bert_preprocessing_command += ' --max_predictions_per_seq=' + args.max_predictions_per_seq
bert_preprocessing_command += ' --masked_lm_prob=' + args.masked_lm_prob
bert_preprocessing_command += ' --random_seed=' + args.random_seed
bert_preprocessing_command += ' --dupe_factor=' + args.dupe_factor
bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % args.n_processes == 0 and shard_id > 0:
bert_preprocessing_process.wait()
for i in range(args.n_training_shards):
create_record_worker(args.output_file_prefix + '_training', i)
last_process.wait()
for i in range(args.n_test_shards):
create_record_worker(args.output_file_prefix + '_test', i)
last_process.wait()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Preprocessing Application for Everything BERT-related'
)
parser.add_argument(
'--action',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords',
choices={
'download', # Download and verify mdf5/sha sums
'text_formatting', # Convert into a file that contains one article/book per line
'sharding', # Convert previous formatted text into shards containing one sentence per line
'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info
'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info
}
)
parser.add_argument(
'--dataset',
type=str,
help='Specify the dataset to perform --action on',
choices={
'bookscorpus',
'wikicorpus_en',
'wikicorpus_zh',
'books_wiki_en_corpus',
'pubmed_baseline',
'pubmed_daily_update',
'pubmed_fulltext',
'pubmed_open_access',
'google_pretrained_weights',
'nvidia_pretrained_weights',
'squad',
'mrpc',
'sst-2',
'mnli',
'cola',
'all'
}
)
parser.add_argument(
'--input_files',
type=str,
help='Specify the input files in a comma-separated list (no spaces)'
)
parser.add_argument(
'--n_training_shards',
type=int,
help='Specify the number of training shards to generate',
default=1472
)
parser.add_argument(
'--n_test_shards',
type=int,
help='Specify the number of test shards to generate',
default=1472
)
parser.add_argument(
'--fraction_test_set',
type=float,
help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)',
default=0.1
)
parser.add_argument(
'--segmentation_method',
type=str,
help='Specify your choice of sentence segmentation',
choices={
'nltk'
},
default='nltk'
)
parser.add_argument(
'--n_processes',
type=int,
help='Specify the max number of processes to allow at one time',
default=4
)
parser.add_argument(
'--random_seed',
type=int,
help='Specify the base seed to use for any random number generation',
default=12345
)
parser.add_argument(
'--dupe_factor',
type=int,
help='Specify the duplication factor',
default=5
)
parser.add_argument(
'--masked_lm_prob',
type=float,
help='Specify the probability for masked lm',
default=0.15
)
parser.add_argument(
'--max_seq_length',
type=int,
help='Specify the maximum sequence length',
default=512
)
parser.add_argument(
'--max_predictions_per_seq',
type=int,
help='Specify the maximum number of masked words per sequence',
default=20
)
parser.add_argument(
'--do_lower_case',
type=int,
help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)',
default=1
)
parser.add_argument(
'--vocab_file',
type=str,
help='Specify absolute path to vocab file to use)'
)
parser.add_argument(
'--skip_wikiextractor',
type=int,
help='Specify whether to skip wikiextractor step 0=False, 1=True',
default=0
)
parser.add_argument(
'--interactive_json_config_generator',
type=str,
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords'
)
args = parser.parse_args()
main(args)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer | maintainer | exceptions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerNotStarted(Exception):
pass
|
PyTorch/Classification/GPUNet/triton | triton | model | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from timm.models.helpers import load_checkpoint
import os
import json
from models.gpunet_builder import GPUNet_Builder
def update_argparser(parser):
parser.add_argument(
"--config", type=str, required=True, help="Network to deploy")
parser.add_argument(
"--checkpoint", type=str, help="The checkpoint of the model. ")
parser.add_argument("--precision", type=str, default="fp32",
choices=["fp32", "fp16"], help="Inference precision")
parser.add_argument(
"--is-prunet", type=bool, required=True, help="Bool on whether network is a prunet")
def get_model(**model_args):
dtype = model_args['precision']
checkpoint = model_args['checkpoint']
configPath = model_args['config']
with open(configPath) as configFile:
modelJSON = json.load(configFile)
configFile.close()
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
if dtype == 'fp16':
dtype = torch.float16
elif dtype == 'fp32':
dtype = torch.float32
else:
raise NotImplementedError
if model_args['is_prunet'] == "True":
model.load_state_dict(torch.load(checkpoint))
else:
load_checkpoint(model, checkpoint, use_ema=True)
model = model.to('cuda', dtype)
model.eval()
tensor_names = {"inputs": ["INPUT__0"],
"outputs": ["OUTPUT__0"]}
return model, tensor_names
|
PyTorch/Classification/ConvNets/triton/scripts/docker | docker | interactive | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker run -it --rm \
--gpus "device=all" \
--net=host \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-e WORKDIR=$(pwd) \
-e PYTHONPATH=$(pwd) \
-v $(pwd):$(pwd) \
-w $(pwd) \
resnet50:latest bash
|
TensorFlow/Classification/ConvNets | ConvNets | inference | import argparse
import os
import pathlib
import time
import tempfile
import tensorflow as tf
import numpy as np
from tensorflow.python.compiler.tensorrt import trt_convert as trt
import dllogger
from runtime import runner_utils
from runtime import runner
from model.resnet import model_architectures
from utils import data_utils
from utils import hvd_wrapper as hvd
OUTPUT_SAVED_MODEL_PATH = tempfile.mkdtemp(prefix="tftrt-converted")
LOG_FREQUENCY = 100
def argument_parser() -> argparse.Namespace:
parser = argparse.ArgumentParser()
exclusive_args = parser.add_mutually_exclusive_group()
exclusive_args.add_argument("--model", type=str, default=None, help="Saved model location to use for inference")
exclusive_args.add_argument("--architecture", type=str, choices=model_architectures.keys())
parser.add_argument("--log-path", type=str, default="./log.json", help="Path to log file")
parser.add_argument("--tf-trt", action="store_true", default=False, help="Use TF-TRT for inference")
parser.add_argument("--amp", action="store_true", default=False, help="Use AMP for inference")
parser.add_argument("--data-dir", type=str, required=False,
default=None, help="Localization of validation data")
parser.add_argument("--batch-size", type=int, default=1, help="Batch size for inference")
return parser.parse_args()
def main(args: argparse.Namespace):
hvd.init()
dllogger.init(backends=[
dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)
])
dllogger.log(data=vars(args), step='PARAMETER')
dllogger.metadata("throughput", {"unit": "images/s"})
dllogger.metadata("accuracy", {"unit": None})
if args.model is None:
saved_model_to_load = tempfile.mkdtemp(prefix="tftrt-savedmodel")
r = runner.Runner(n_classes=1001, architecture=args.architecture, use_tf_amp=args.amp,
model_dir=saved_model_to_load)
r.train("batch", 1, 1, args.batch_size, is_benchmark=True)
r.evaluate("batch", 1, args.batch_size, export_dir=saved_model_to_load,
is_benchmark=True)
saved_model_to_load = r.exported_path.decode("utf-8")
else:
saved_model_to_load = args.model
output_tensor_name = "y_preds_ref:0" if not args.tf_trt else "ArgMax:0"
batch_size = args.batch_size
if args.tf_trt:
converter = trt.TrtGraphConverter(input_saved_model_dir=str(saved_model_to_load),
precision_mode="FP16" if args.amp else "FP32")
converter.convert()
converter.save(OUTPUT_SAVED_MODEL_PATH)
saved_model_to_load = OUTPUT_SAVED_MODEL_PATH
elif args.amp:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
if args.data_dir is not None:
filenames, _, num_steps, _, _ = runner_utils.parse_tfrecords_dataset(
data_dir=str(args.data_dir),
mode="validation",
iter_unit="epoch",
num_iter=1,
global_batch_size=batch_size,
)
dataset = data_utils.get_tfrecords_input_fn(filenames=filenames,
batch_size=batch_size,
height=224,
width=224,
training=False,
distort_color=False,
num_threads=1,
deterministic=True)
iterator = dataset.make_initializable_iterator()
next_item = iterator.get_next()
else:
num_steps=60000 / batch_size
with tf.Session() as sess:
if args.data_dir is not None:
sess.run(iterator.initializer)
tf.saved_model.loader.load(sess,
[tf.saved_model.tag_constants.SERVING],
str(saved_model_to_load))
try:
start_time = time.time()
last_time = start_time
image_processed = 0
image_correct = 0
for samples_processed in range(int(num_steps)):
if args.data_dir is not None:
next_batch_image, next_batch_target = sess.run(next_item)
else:
if samples_processed == 0:
next_batch_image = np.random.normal(size=(batch_size, 224, 224, 3))
next_batch_target = np.random.randint(0, 1000, size=(batch_size,))
output = sess.run([output_tensor_name], feed_dict={"input_tensor:0": next_batch_image})
image_processed += args.batch_size
image_correct += np.sum(output == next_batch_target)
if samples_processed % LOG_FREQUENCY == 0 and samples_processed != 0:
current_time = time.time()
current_throughput = LOG_FREQUENCY * batch_size / (current_time - last_time)
dllogger.log(step=(0, samples_processed), data={"throughput": current_throughput})
last_time = current_time
except tf.errors.OutOfRangeError:
pass
finally:
dllogger.log(step=tuple(), data={"throughput": image_processed / (last_time - start_time),
"accuracy": image_correct / image_processed})
if __name__ == "__main__":
main(argument_parser()) |
TensorFlow2/LanguageModeling/ELECTRA/data | data | __init__ | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
TensorFlow/Classification/ConvNets/model/layers | layers | math_ops | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
__all__ = ['reduce_mean']
def reduce_mean(inputs, keepdims=None, data_format='channels_last', name='spatial_mean'):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
axes = [1, 2] if data_format == 'NHWC' else [2, 3]
net = tf.math.reduce_mean(inputs, axis=axes, keepdims=keepdims, name=name)
return net
|
Tools/DGLPyTorch/SyntheticGraphGeneration/configurations | configurations | ogbn_mag | {
"nodes": [
{
"name": "paper",
"count": 736389,
"features": [
{
"name": "feat_0",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_1",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_2",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_3",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_4",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_5",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_6",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_7",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_8",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_9",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_10",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_11",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_12",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_13",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_14",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_15",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_16",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_17",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_18",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_19",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_20",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_21",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_22",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_23",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_24",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_25",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_26",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_27",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_28",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_29",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_30",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_31",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_32",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_33",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_34",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_35",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_36",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_37",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_38",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_39",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_40",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_41",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_42",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_43",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_44",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_45",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_46",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_47",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_48",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_49",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_50",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_51",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_52",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_53",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_54",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_55",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_56",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_57",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_58",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_59",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_60",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_61",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_62",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_63",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_64",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_65",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_66",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_67",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_68",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_69",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_70",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_71",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_72",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_73",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_74",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_75",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_76",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_77",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_78",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_79",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_80",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_81",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_82",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_83",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_84",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_85",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_86",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_87",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_88",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_89",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_90",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_91",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_92",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_93",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_94",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_95",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_96",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_97",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_98",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_99",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_100",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_101",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_102",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_103",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_104",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_105",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_106",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_107",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_108",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_109",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_110",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_111",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_112",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_113",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_114",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_115",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_116",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_117",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_118",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_119",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_120",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_121",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_122",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_123",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_124",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_125",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_126",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_127",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "year",
"dtype": "int32",
"feature_type": "categorical"
},
{
"name": "venue",
"dtype": "int32",
"feature_type": "categorical"
}
],
"features_path": "paper.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": -1,
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "paper"
},
"params": {
"gpu": true
}
}
]
},
{
"name": "author",
"count": 1134649,
"features": [
{
"name": "feat_0",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_1",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_2",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_3",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_4",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_5",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_6",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_7",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_8",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_9",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_10",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_11",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_12",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_13",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_14",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_15",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_16",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_17",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_18",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_19",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_20",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_21",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_22",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_23",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_24",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_25",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_26",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_27",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_28",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_29",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_30",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_31",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_32",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_33",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_34",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_35",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_36",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_37",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_38",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_39",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_40",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_41",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_42",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_43",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_44",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_45",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_46",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_47",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_48",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_49",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_50",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_51",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_52",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_53",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_54",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_55",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_56",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_57",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_58",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_59",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_60",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_61",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_62",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_63",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_64",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_65",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_66",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_67",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_68",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_69",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_70",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_71",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_72",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_73",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_74",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_75",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_76",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_77",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_78",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_79",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_80",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_81",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_82",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_83",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_84",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_85",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_86",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_87",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_88",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_89",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_90",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_91",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_92",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_93",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_94",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_95",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_96",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_97",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_98",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_99",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_100",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_101",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_102",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_103",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_104",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_105",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_106",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_107",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_108",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_109",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_110",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_111",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_112",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_113",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_114",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_115",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_116",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_117",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_118",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_119",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_120",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_121",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_122",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_123",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_124",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_125",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_126",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_127",
"dtype": "float32",
"feature_type": "continuous"
}
],
"features_path": "author.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": -1,
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "author"
},
"params": {
"gpu": true
}
}
]
},
{
"name": "institution",
"count": 8740,
"features": [
{
"name": "feat_0",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_1",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_2",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_3",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_4",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_5",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_6",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_7",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_8",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_9",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_10",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_11",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_12",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_13",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_14",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_15",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_16",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_17",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_18",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_19",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_20",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_21",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_22",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_23",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_24",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_25",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_26",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_27",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_28",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_29",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_30",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_31",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_32",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_33",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_34",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_35",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_36",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_37",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_38",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_39",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_40",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_41",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_42",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_43",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_44",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_45",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_46",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_47",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_48",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_49",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_50",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_51",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_52",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_53",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_54",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_55",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_56",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_57",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_58",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_59",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_60",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_61",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_62",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_63",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_64",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_65",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_66",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_67",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_68",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_69",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_70",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_71",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_72",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_73",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_74",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_75",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_76",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_77",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_78",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_79",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_80",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_81",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_82",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_83",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_84",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_85",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_86",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_87",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_88",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_89",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_90",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_91",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_92",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_93",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_94",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_95",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_96",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_97",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_98",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_99",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_100",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_101",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_102",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_103",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_104",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_105",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_106",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_107",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_108",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_109",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_110",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_111",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_112",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_113",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_114",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_115",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_116",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_117",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_118",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_119",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_120",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_121",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_122",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_123",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_124",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_125",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_126",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_127",
"dtype": "float32",
"feature_type": "continuous"
}
],
"features_path": "institution.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": -1,
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "institution"
},
"params": {
"gpu": true
}
}
]
},
{
"name": "field_of_study",
"count": 59965,
"features": [
{
"name": "feat_0",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_1",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_2",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_3",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_4",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_5",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_6",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_7",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_8",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_9",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_10",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_11",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_12",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_13",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_14",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_15",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_16",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_17",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_18",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_19",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_20",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_21",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_22",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_23",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_24",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_25",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_26",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_27",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_28",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_29",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_30",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_31",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_32",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_33",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_34",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_35",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_36",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_37",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_38",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_39",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_40",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_41",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_42",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_43",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_44",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_45",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_46",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_47",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_48",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_49",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_50",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_51",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_52",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_53",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_54",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_55",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_56",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_57",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_58",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_59",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_60",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_61",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_62",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_63",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_64",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_65",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_66",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_67",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_68",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_69",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_70",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_71",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_72",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_73",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_74",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_75",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_76",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_77",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_78",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_79",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_80",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_81",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_82",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_83",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_84",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_85",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_86",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_87",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_88",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_89",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_90",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_91",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_92",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_93",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_94",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_95",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_96",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_97",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_98",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_99",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_100",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_101",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_102",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_103",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_104",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_105",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_106",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_107",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_108",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_109",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_110",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_111",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_112",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_113",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_114",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_115",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_116",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_117",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_118",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_119",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_120",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_121",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_122",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_123",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_124",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_125",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_126",
"dtype": "float32",
"feature_type": "continuous"
},
{
"name": "feat_127",
"dtype": "float32",
"feature_type": "continuous"
}
],
"features_path": "field_of_study.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": -1,
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "field_of_study"
},
"params": {
"gpu": true
}
}
]
}
],
"edges": [
{
"name": "affiliated_with",
"count": 1043998,
"src_node_type": "author",
"dst_node_type": "institution",
"directed": false,
"features": [
{
"name": "feat",
"dtype": "int64",
"feature_type": "categorical"
}
],
"features_path": "affiliated_with_features.parquet",
"structure_path": "affiliated_with_list.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": -1,
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "affiliated_with"
},
"params": {
"gpu": true
}
}
],
"[gen]structure_generator": {
"type": "RMAT",
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "affiliated_with"
},
"params": {
"seed": 42,
"gpu": true
}
}
},
{
"name": "writes",
"count": 7145660,
"src_node_type": "author",
"dst_node_type": "paper",
"directed": false,
"features": [
{
"name": "feat",
"dtype": "int64",
"feature_type": "categorical"
}
],
"features_path": "writes_features.parquet",
"structure_path": "writes_list.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": -1,
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "writes"
},
"params": {
"gpu": true
}
}
],
"[gen]structure_generator": {
"type": "RMAT",
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "writes"
},
"params": {
"seed": 42,
"gpu": true
}
}
},
{
"name": "cites",
"count": 5416271,
"src_node_type": "paper",
"dst_node_type": "paper",
"directed": false,
"features": [
{
"name": "feat",
"dtype": "int64",
"feature_type": "categorical"
}
],
"features_path": "cites_features.parquet",
"structure_path": "cites_list.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": -1,
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "cites"
},
"params": {
"gpu": true
}
}
],
"[gen]structure_generator": {
"type": "RMAT",
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "cites"
},
"params": {
"seed": 42,
"gpu": true
}
}
},
{
"name": "has_topic",
"count": 7505078,
"src_node_type": "paper",
"dst_node_type": "field_of_study",
"directed": false,
"features": [
{
"name": "feat",
"dtype": "int64",
"feature_type": "categorical"
}
],
"features_path": "has_topic_features.parquet",
"structure_path": "has_topic_list.parquet",
"[gen]tabular_generators": [
{
"type": "kde",
"features_list": -1,
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "has_topic"
},
"params": {
"gpu": true
}
}
],
"[gen]structure_generator": {
"type": "RMAT",
"data_source": {
"type": "cfg",
"path": "/workspace/data/ogbn_mag/syngen_preprocessed",
"name": "has_topic"
},
"params": {
"seed": 42,
"gpu": true
}
}
}
]
} |
PyTorch/Classification/GPUNet/triton | triton | requirements | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
model_navigator[pyt] @ git+https://github.com/triton-inference-server/[email protected]#egg=model_navigator
natsort>=7.0.0
networkx==2.5
pycuda>=2019.1.2
PyYAML>=5.2
tabulate>=0.8.7
tqdm>=4.44.1
|
CUDA-Optimized/FastSpeech/fastspeech/utils | utils | optimizer | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
class ScheduledOptim():
''' A simple wrapper class for learning rate scheduling '''
def __init__(self, optimizer, d_model, n_warmup_steps, current_steps):
self._optimizer = optimizer
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = current_steps
self.init_lr = np.power(d_model, -0.5)
def step_and_update_lr_frozen(self, learning_rate_frozen):
for param_group in self._optimizer.param_groups:
param_group['lr'] = learning_rate_frozen
self._optimizer.step()
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def get_learning_rate(self):
learning_rate = 0.0
for param_group in self._optimizer.param_groups:
learning_rate = param_group['lr']
return learning_rate
def zero_grad(self):
# print(self.init_lr)
self._optimizer.zero_grad()
def _get_lr_scale(self):
return np.min([
np.power(self.n_current_steps, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.n_current_steps])
def _update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_current_steps += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
|
PyTorch/Forecasting/TFT | TFT | configuration | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data_utils import InputTypes, DataTypes, FeatureSpec
import datetime
class ElectricityConfig():
def __init__(self):
self.features = [
FeatureSpec('id', InputTypes.ID, DataTypes.CATEGORICAL),
FeatureSpec('hours_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('power_usage', InputTypes.TARGET, DataTypes.CONTINUOUS),
FeatureSpec('hour', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('hours_from_start', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('categorical_id', InputTypes.STATIC, DataTypes.CATEGORICAL),
]
# Dataset split boundaries
self.time_ids = 'days_from_start' # This column contains time indices across which we split the data
self.train_range = (1096, 1315)
self.valid_range = (1308, 1339)
self.test_range = (1332, 1346)
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = True
self.missing_id_strategy = None
self.missing_cat_data_strategy='encode_all'
# Feature sizes
self.static_categorical_inp_lens = [369]
self.temporal_known_categorical_inp_lens = []
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 8 * 24
self.encoder_length = 7 * 24
self.n_head = 4
self.hidden_size = 128
self.dropout = 0.1
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
class TrafficConfig():
def __init__(self):
self.features = [
FeatureSpec('id', InputTypes.ID, DataTypes.CATEGORICAL),
FeatureSpec('hours_from_start', InputTypes.TIME, DataTypes.CONTINUOUS),
FeatureSpec('values', InputTypes.TARGET, DataTypes.CONTINUOUS),
FeatureSpec('time_on_day', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('day_of_week', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('hours_from_start', InputTypes.KNOWN, DataTypes.CONTINUOUS),
FeatureSpec('categorical_id', InputTypes.STATIC, DataTypes.CATEGORICAL),
]
# Dataset split boundaries
self.time_ids = 'sensor_day' # This column contains time indices across which we split the data
self.train_range = (0, 151)
self.valid_range = (144, 166)
self.test_range = (159, float('inf'))
self.dataset_stride = 1 #how many timesteps between examples
self.scale_per_id = False
self.missing_id_strategy = None
self.missing_cat_data_strategy='encode_all'
# Feature sizes
self.static_categorical_inp_lens = [963]
self.temporal_known_categorical_inp_lens = []
self.temporal_observed_categorical_inp_lens = []
self.quantiles = [0.1, 0.5, 0.9]
self.example_length = 8 * 24
self.encoder_length = 7 * 24
self.n_head = 4
self.hidden_size = 128
self.dropout = 0.3
self.attn_dropout = 0.0
#### Derived variables ####
self.temporal_known_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.KNOWN and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_observed_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.OBSERVED and x.feature_embed_type == DataTypes.CONTINUOUS])
self.temporal_target_size = len([x for x in self.features if x.feature_type == InputTypes.TARGET])
self.static_continuous_inp_size = len([x for x in self.features
if x.feature_type == InputTypes.STATIC and x.feature_embed_type == DataTypes.CONTINUOUS])
self.num_static_vars = self.static_continuous_inp_size + len(self.static_categorical_inp_lens)
self.num_future_vars = self.temporal_known_continuous_inp_size + len(self.temporal_known_categorical_inp_lens)
self.num_historic_vars = sum([self.num_future_vars,
self.temporal_observed_continuous_inp_size,
self.temporal_target_size,
len(self.temporal_observed_categorical_inp_lens),
])
CONFIGS = {'electricity': ElectricityConfig,
'traffic': TrafficConfig,
}
|
PyTorch/SpeechSynthesis/FastPitch/platform | platform | DGX1_FastPitch_FP32_1GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=1}
: ${BATCH_SIZE:=16}
: ${GRAD_ACCUMULATION:=16}
: ${AMP:=false}
bash scripts/train.sh "$@"
|
PyTorch/Translation/Transformer/fairseq/modules | modules | sinusoidal_positional_embedding | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from typing import Optional, Dict
import torch
import torch.nn as nn
from torch import Tensor
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
def __init__(self, embedding_dim, padding_idx, left_pad, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.left_pad = left_pad
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
# JIT compliance
self.register_buffer(
'positions_buffer', torch.arange(padding_idx + 1, init_size + padding_idx + 1))
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: int):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
emb[padding_idx] = torch.zeros(emb.shape[1]) # emb[padding_idx, :] = 0
return emb
def forward(self, input: Tensor, incremental_state: Optional[Dict[str, Dict[str, Tensor]]]=None):
"""Input is expected to be of size [bsz x seqlen]."""
# recompute/expand embeddings if needed
bsz, seq_len = input.size()
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.type_as(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
return self.weights[self.padding_idx + seq_len, :].expand(bsz, 1, -1)
#### JIT ####
mask = input.ne(self.padding_idx)
positions = self.positions_buffer[:input.size(1)].expand_as(input)
if self.left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
positions = input.clone().masked_scatter_(mask, positions[mask])
#############
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
|
TensorFlow/Detection/SSD/examples | examples | SSD320_FP32_1GPU | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CKPT_DIR=${1:-"/results/SSD320_FP32_1GPU"}
PIPELINE_CONFIG_PATH=${2:-"/workdir/models/research/configs"}"/ssd320_full_1gpus.config"
TENSOR_OPS=0
export TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
time python -u ./object_detection/model_main.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--model_dir=${CKPT_DIR} \
--alsologtostder \
"${@:3}"
|
Tools/PyTorch/TimeSeriesPredictionPlatform/evaluators | evaluators | evaluator | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from abc import ABC
import dgl
import numpy as np
import torch
from data.datasets import get_collate_fn
from distributed_utils import get_mp_context
from torch.utils.data import DataLoader
from training.utils import to_device
from .evaluation_metrics import METRICS
import pandas as pd
class MetricEvaluator(ABC):
def __init__(self, config):
self.output_selector = config.get("output_selector", None)
self.metrics = []
preprocessor_state = pickle.load(open(config.preprocessor_state_path, "rb"))
self.scalers = preprocessor_state["scalers"]
self.save_predictions = config.get("save_predictions", False)
self.example_history = []
for name in config.metrics:
if name not in METRICS:
raise ValueError(f"No metric of name: {name}")
self.metrics.append(METRICS[name]())
self.config = config
def predict(self, *args, **kwargs):
raise NotImplementedError
def save_preds(self, preds, ids):
all_examples = self.example_history
all_examples = all_examples.transpose(2,0,1).reshape(-1, all_examples.shape[1])
if len(preds.shape) == 4:
tgt_ords = np.arange(preds.shape[2]).repeat(preds.shape[0])
tgt_ords = pd.DataFrame(tgt_ords, columns=['#target'])
preds = preds.transpose(2,0,1,3).reshape(-1,preds.shape[1], preds.shape[3])
ids = ids.transpose().reshape(-1)
else:
tgt_ords = None
all_examples = self.scalers.inverse_transform_targets(all_examples, ids)
hist_df = pd.DataFrame(all_examples, columns=[f't{i+1}' for i in range(-self.config.encoder_length, 0)])
ids = pd.DataFrame(ids, columns=['id'])
col_labels = [f'Estimator{j}_t{i:+}' for j in range(preds.shape[2]) for i in range(preds.shape[1])]
preds_df = pd.DataFrame(preds.reshape(preds.shape[0],-1, order='F'), columns=col_labels)
df = pd.concat([ids, tgt_ords, hist_df, preds_df], axis=1)
df.to_csv('predictions.csv')
def evaluate(self, preds, labels, ids, weights):
results = {}
# In multi target case we treat each target as a separate example.
# Then we can reduce it to a single target case setting BS = prev_BS * num_targets
if len(preds.shape) == 4:
if self.scalers.scale_per_id:
ids = np.arange(preds.shape[-2])
ids = np.repeat(ids, preds.shape[0])
else:
ids = None
# TODO: this causes a memory movement. Rewrite this with views!
preds = np.concatenate([preds[:, :, i] for i in range(preds.shape[-2])], axis=0)
labels = np.concatenate([labels[:, :, i] for i in range(labels.shape[-1])], axis=0)
weights = np.concatenate([weights[:, :, i] for i in range(weights.shape[-1])], axis=0)
elif len(preds.shape) == 3:
labels = labels.squeeze(-1)
if weights.size:
weights = weights.squeeze(-1)
else:
raise ValueError("Expected shape of predictions is either BSxTxFxH or BSxTxH")
upreds = np.stack([self.scalers.inverse_transform_targets(preds[..., i], ids) for i in range(preds.shape[-1])],
axis=-1)
labels = self.scalers.inverse_transform_targets(labels, ids)
if self.save_predictions:
self.save_preds(upreds, ids)
for metric in self.metrics:
selector = getattr(metric, 'selector', self.output_selector)
preds = upreds[..., selector]
results[metric.name] = metric(preds, labels, weights) if np.all(np.isfinite(preds)) else np.NaN
results = {k: float(v) for k, v in results.items()}
return results
class CTLMetricEvaluator(MetricEvaluator):
def __init__(self, test_data, config):
super().__init__(config)
self.device = config.device
if test_data is not None:
mp_context = get_mp_context()
self.dataloader = DataLoader(
test_data,
batch_size=self.config.batch_size,
num_workers=1,
pin_memory=True,
collate_fn=get_collate_fn(config.model_type, config.encoder_length, test=True),
multiprocessing_context=mp_context
)
else:
self.dataloader = None
def prep_data(self, batch):
ids = batch.ndata['id'] if isinstance(batch, dgl.DGLGraph) else batch["id"]
ids = ids[:, 0, ...] # Shape BS x T x F [x H]
weights = batch.ndata['weight'] if isinstance(batch, dgl.DGLGraph) else batch['weight']
weights = weights[:, self.config.encoder_length:,
:] if weights is not None and weights.numel() else torch.empty(0)
batch = to_device(batch, device=self.device)
return batch, weights, ids
def predict(self, model, dataloader=None):
if not dataloader:
dataloader = self.dataloader
assert dataloader is not None, "Dataloader cannot be None, either pass in a valid dataloader or \
initialize evaluator with valid test_data"
test_method_name = 'predict' if hasattr(model, "predict") else '__call__'
test_method = getattr(model, test_method_name)
model.eval()
with torch.no_grad():
preds_full = []
labels_full = []
weights_full = []
ids_full = []
for i, (batch, labels, _) in enumerate(dataloader):
if self.save_predictions:
self.example_history.append(batch['target'][:,:self.config.encoder_length].detach().cpu())
batch, weights, ids = self.prep_data(batch)
labels_full.append(labels)
weights_full.append(weights)
preds = test_method(batch)
ids_full.append(ids)
preds_full.append(preds)
preds_full = torch.cat(preds_full, dim=0).cpu().numpy()
labels_full = torch.cat(labels_full, dim=0).cpu().numpy()
weights_full = torch.cat(weights_full).cpu().numpy()
ids_full = torch.cat(ids_full).cpu().numpy()
if self.save_predictions:
self.example_history = torch.cat(self.example_history, dim=0).cpu().numpy()
return preds_full, labels_full, ids_full, weights_full
class StatMetricEvaluator(MetricEvaluator):
def __init__(self, test_data, config):
super().__init__(config)
self.dataloader = test_data
def predict(self, model, dataloader=None):
dataloader = dataloader or self.dataloader
assert dataloader, "Test dataloader not provided"
preds_full = []
labels_full = []
weights_full = []
ids_full = []
for i, test_batch in enumerate(dataloader):
labels = test_batch["endog"]
ids = test_batch["id"].iloc[0]
preds = np.array(model.predict(test_batch["exog"], i))
labels_full.append(labels)
weights_full.append(test_batch.get('weight', []))
ids_full.append(ids)
preds_full.append(preds)
preds_full = np.stack(preds_full)
labels_full = np.stack(labels_full)
weights_full = np.stack(weights_full)
ids_full = np.stack(ids_full)
if len(preds_full.shape) == 2:
preds_full = preds_full[:, :, np.newaxis]
return preds_full, labels_full, ids_full, weights_full
class XGBMetricEvaluator(MetricEvaluator):
def __init__(self, test_data, config):
super().__init__(config)
self.dataloader = test_data
def predict(self, model, dataloader=None):
dataloader = dataloader or self.dataloader
assert dataloader, "Test dataloader not provided"
out = []
labels = []
ids = []
weights = []
for i, (test_step, test_label) in enumerate(dataloader):
labels.append(test_label.to_numpy())
ids.append(test_step['_id_'].to_numpy())
outt = model.predict(test_step, i)
weights.append([])
out.append(outt)
outtemp = np.vstack(out).transpose()
labels_temp = np.hstack(labels)
ids_temp = np.vstack(ids).transpose()[:, 0]
if len(outtemp.shape) == 2:
outtemp = outtemp[:, :, np.newaxis]
if len(labels_temp.shape) == 2:
labels_temp = labels_temp[:, :, np.newaxis]
if self.save_predictions:
labels_ids = self.dataloader.data[['_id_', self.dataloader.target[0]]]
for n, g in labels_ids.groupby("_id_"):
labels_all = g[self.dataloader.target[0]].to_numpy().round(6)
windows_labels = np.lib.stride_tricks.sliding_window_view(labels_all, self.dataloader.example_length)
self.example_history.append(windows_labels.copy()[:, :self.dataloader.encoder_length])
self.example_history = np.concatenate(self.example_history, axis=0)[:, :, np.newaxis]
return outtemp, labels_temp, ids_temp, np.stack(weights)
|
PyTorch/Classification/ConvNets | ConvNets | multiproc | # From PyTorch:
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# From Caffe2:
#
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
#
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
#
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
#
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
#
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
#
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
#
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import subprocess
import os
import socket
import time
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(
description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes"
)
# Optional arguments for the launch helper
parser.add_argument(
"--nnodes",
type=int,
default=1,
help="The number of nodes to use for distributed " "training",
)
parser.add_argument(
"--node_rank",
type=int,
default=0,
help="The rank of the node for multi-node distributed " "training",
)
parser.add_argument(
"--nproc_per_node",
type=int,
default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.",
)
parser.add_argument(
"--master_addr",
default="127.0.0.1",
type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1",
)
parser.add_argument(
"--master_port",
default=29500,
type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training",
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
cmd = [sys.executable, "-u", args.training_script] + args.training_script_args
print(cmd)
stdout = (
None if local_rank == 0 else open("GPU_" + str(local_rank) + ".log", "w")
)
process = subprocess.Popen(cmd, env=current_env, stdout=stdout, stderr=stdout)
processes.append(process)
try:
up = True
error = False
while up and not error:
up = False
for p in processes:
ret = p.poll()
if ret is None:
up = True
elif ret != 0:
error = True
time.sleep(1)
if error:
for p in processes:
if p.poll() is None:
p.terminate()
exit(1)
except KeyboardInterrupt:
for p in processes:
p.terminate()
raise
except SystemExit:
for p in processes:
p.terminate()
raise
except:
for p in processes:
p.terminate()
raise
if __name__ == "__main__":
main()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | tacotron2Loader | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "tacotron2Loader.h"
#include "encoderInstance.h"
#include "engineCache.h"
#include "tacotron2Builder.h"
#include "trtUtils.h"
#include "utils.h"
#include "NvInfer.h"
#include <stdexcept>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
std::shared_ptr<Tacotron2Instance> Tacotron2Loader::load(EngineCache& cache, IBuilder& builder,
const std::string& filename, const int inputLength, const bool fp16, const int batchSize)
{
std::vector<TRTPtr<ICudaEngine>> engines;
if (Utils::hasExtension(filename, ".pt")
|| Utils::hasExtension(filename, ".json")) {
Tacotron2Builder tacotron2Builder(filename);
engines = tacotron2Builder.build(inputLength, builder, batchSize, fp16);
// save generated engine
const std::string engFilename(
filename + "_" + std::to_string(inputLength) + ".eng");
cache.save(engines, engFilename);
}
else if (Utils::hasExtension(filename, ".eng"))
{
engines = cache.loadComposite(filename);
for (size_t i = 0; i < engines.size(); ++i)
{
const TRTPtr<ICudaEngine>& engine = engines[i];
// make sure all engines except the plugin engine can support the
// batch size, or if we don't have both a plain and plugin engine,
// make sure the batch size is supported
if (!(engines.size() == 4 && i == 2)
&& engine->getMaxBatchSize() < batchSize) {
throw std::runtime_error(
"Engine " + filename + ":" + std::to_string(i)
+ " does not support "
" the requested batch size: "
+ std::to_string(engine->getMaxBatchSize()) + " / "
+ std::to_string(batchSize)
+ ". "
"Rebuild the engine with the larger batch size.");
}
const int maxLen = TRTUtils::getBindingSize(*engines[0], EncoderInstance::INPUT_NAME);
if (inputLength > maxLen)
{
throw std::runtime_error(
"Engine " + filename
+ " is built for a "
"maximum input length of "
+ std::to_string(maxLen) + " but " + std::to_string(inputLength)
+ " is requested. Rebuild the engine "
"with the larger input size.");
}
}
}
else
{
throw std::runtime_error("Unknown model file type: " + filename);
}
if (engines.size() != 4)
{
throw std::runtime_error(
"Invalid engine file, contains " + std::to_string(engines.size()) + " engines, but expected 4.");
}
return std::make_shared<Tacotron2Instance>(
std::move(engines[0]), std::move(engines[1]), std::move(engines[2]), std::move(engines[3]));
}
} // namespace tts
|
TensorFlow/Recommendation/VAE-CF | VAE-CF | prepare_dataset | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from vae.load.preprocessing import load_and_parse_ML_20M
import numpy as np
parser = ArgumentParser(description="Prepare data for VAE training")
parser.add_argument('--data_dir', default='/data', type=str,
help='Directory for storing the training data')
parser.add_argument('--seed', default=0, type=int,
help='Random seed')
args = parser.parse_args()
print('Preprocessing seed: ', args.seed)
np.random.seed(args.seed)
# load dataset
(train_data,
validation_data_input,
validation_data_true,
test_data_input,
test_data_true) = load_and_parse_ML_20M(args.data_dir)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/optimizer | optimizer | SparseAdam | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: torch.optim.SparseAdam
lr: 0.001
betas: [0.9, 0.999]
eps: 1e-8
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment | deployment | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
|
TensorFlow2/Detection | Detection | README | # Object Detection
A natural progression from image classification would be classification and localization of the subject of the image. We can take this idea one step further and localize objects in a given image. Simply put, object detection refers to identifying which object(s) are there in an image.

Source: [Joseph Redmon, Ali Farhadi, “YOLO9000:Better, Faster, Stronger”](https://arxiv.org/abs/1612.08242)
## Introduction to Object Detection
In this section we will try to answer the following questions:
- What is object detection?
- Why is object detection important?
Object Detection is about not only detecting the presence and location of objects in images and videos, but also categorizing them into everyday objects. Oftentimes, there is a confusion between Image Classification and Object Detection. Simply put, the difference between them is the same as the difference between saying “This is a cat” and pointing to a cat and saying “There is the cat”.
To build autonomous systems, perception is the main challenge to be solved. Perception, in terms of autonomous systems refers to the ability of understanding the surroundings of the autonomous agent. This means that the agent needs to be able to figure out where and what objects are in its immediate vicinity.
Object detection can help keep humans away from toxic environments and hazardous situations. Challenges like garbage segregation, oil rig monitoring, nightly surveillance, cargo port maintenance and other high risk applications can be aided by robots/cameras which can detect objects. Essentially, any environment that requires visual inspection or analysis and is too dangerous for humans, object detection pipelines can be used to shield from any onsite hazard.
## How does it work?
While this has been a topic of research since before Deep Learning became mainstream, the best performing models today use one or more Deep Neural Networks.
Many architectures have networks pretrained on a different, simpler task, like Image Classification. As one can imagine, the inputs to this task can be images or videos, and the outputs are usually a set of bounding box coordinates that enclose each of the detected objects, as well as a class label for each detected object. With advances in research and the use of GPUs, it is possible to have object detection in real time with really impressive accuracies!
This Collection contains models and containers for object detection achieving state-of-the-art accuracies, tested and maintained by Nvidia.
## Applications and Use cases
### Autonomous Vehicles
Autonomous vehicles need to perceive and interact with real world objects in order to blend in with the environment. For instance a self-driving car needs to detect other vehicles, pedestrians, objects on the road, traffic signals and any and all obstacles on road and also understand the exact location of these objects. This perception information helps the agent avoid obstacles and understand how to interact with objects like traffic lights.
### Warehouses
Warehouses have many conveyor belts and segregation platforms. These tasks have traditionally been handled manually. As factories and warehouses scale, manually sorting and managing inventory cannot be scaled proportionally. Object detection pipelines deployed on robots can reduce operational friction and enable easy scale up solutions for businesses.
### Surveillance
Surveillance systems typically accumulate large volumes of video data which needs to be analyzed for all sorts of anomalies. Given the number of video sources even a small store has, analysing surveillance data from a large operation is a challenge. Object detection networks can help automate much of the pipeline to highlight sections where there is an object of interest. It can also be trained to identify anomalies in video streams.
### Hazardous tasks
Humans work at waste processing plants, nuclear power plants, oil rigs and around heavy machinery, which tend to be extremely hazardous and dangerous which pose health risks. These tasks essentially require human presence for visual tasks and confirmations which revolve around recognizing objects and relaying locations of objects. Risky tasks like these can be completed with a help of a object detection pipeline deployed on a camera or a robot which can reduce operational risks and costs. |
PyTorch/SpeechRecognition/QuartzNet/platform | platform | DGX2_QuartzNet_AMP_16GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=16}
: ${GPU_BATCH_SIZE:=36}
: ${GRAD_ACCUMULATION:=2}
: ${AMP=:true}
bash scripts/train.sh "$@"
|
TensorFlow/Detection/SSD/models/research/object_detection/metrics | metrics | oid_vrd_challenge_evaluation | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs evaluation using OpenImages groundtruth and predictions.
Example usage:
python \
models/research/object_detection/metrics/oid_vrd_challenge_evaluation.py \
--input_annotations_boxes=/path/to/input/annotations-human-bbox.csv \
--input_annotations_labels=/path/to/input/annotations-label.csv \
--input_class_labelmap=/path/to/input/class_labelmap.pbtxt \
--input_relationship_labelmap=/path/to/input/relationship_labelmap.pbtxt \
--input_predictions=/path/to/input/predictions.csv \
--output_metrics=/path/to/output/metric.csv \
CSVs with bounding box annotations and image label (including the image URLs)
can be downloaded from the Open Images Challenge website:
https://storage.googleapis.com/openimages/web/challenge.html
The format of the input csv and the metrics itself are described on the
challenge website.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import pandas as pd
from google.protobuf import text_format
from object_detection.metrics import io_utils
from object_detection.metrics import oid_vrd_challenge_evaluation_utils as utils
from object_detection.protos import string_int_label_map_pb2
from object_detection.utils import vrd_evaluation
def _load_labelmap(labelmap_path):
"""Loads labelmap from the labelmap path.
Args:
labelmap_path: Path to the labelmap.
Returns:
A dictionary mapping class name to class numerical id.
"""
label_map = string_int_label_map_pb2.StringIntLabelMap()
with open(labelmap_path, 'r') as fid:
label_map_string = fid.read()
text_format.Merge(label_map_string, label_map)
labelmap_dict = {}
for item in label_map.item:
labelmap_dict[item.name] = item.id
return labelmap_dict
def _swap_labelmap_dict(labelmap_dict):
"""Swaps keys and labels in labelmap.
Args:
labelmap_dict: Input dictionary.
Returns:
A dictionary mapping class name to class numerical id.
"""
return dict((v, k) for k, v in labelmap_dict.iteritems())
def main(parsed_args):
all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes)
all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels)
all_annotations = pd.concat([all_box_annotations, all_label_annotations])
class_label_map = _load_labelmap(parsed_args.input_class_labelmap)
relationship_label_map = _load_labelmap(
parsed_args.input_relationship_labelmap)
relation_evaluator = vrd_evaluation.VRDRelationDetectionEvaluator()
phrase_evaluator = vrd_evaluation.VRDPhraseDetectionEvaluator()
for _, groundtruth in enumerate(all_annotations.groupby('ImageID')):
image_id, image_groundtruth = groundtruth
groundtruth_dictionary = utils.build_groundtruth_vrd_dictionary(
image_groundtruth, class_label_map, relationship_label_map)
relation_evaluator.add_single_ground_truth_image_info(
image_id, groundtruth_dictionary)
phrase_evaluator.add_single_ground_truth_image_info(image_id,
groundtruth_dictionary)
all_predictions = pd.read_csv(parsed_args.input_predictions)
for _, prediction_data in enumerate(all_predictions.groupby('ImageID')):
image_id, image_predictions = prediction_data
prediction_dictionary = utils.build_predictions_vrd_dictionary(
image_predictions, class_label_map, relationship_label_map)
relation_evaluator.add_single_detected_image_info(image_id,
prediction_dictionary)
phrase_evaluator.add_single_detected_image_info(image_id,
prediction_dictionary)
relation_metrics = relation_evaluator.evaluate(
relationships=_swap_labelmap_dict(relationship_label_map))
phrase_metrics = phrase_evaluator.evaluate(
relationships=_swap_labelmap_dict(relationship_label_map))
with open(parsed_args.output_metrics, 'w') as fid:
io_utils.write_csv(fid, relation_metrics)
io_utils.write_csv(fid, phrase_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=
'Evaluate Open Images Visual Relationship Detection predictions.')
parser.add_argument(
'--input_annotations_boxes',
required=True,
help='File with groundtruth vrd annotations.')
parser.add_argument(
'--input_annotations_labels',
required=True,
help='File with groundtruth labels annotations')
parser.add_argument(
'--input_predictions',
required=True,
help="""File with detection predictions; NOTE: no postprocessing is
applied in the evaluation script.""")
parser.add_argument(
'--input_class_labelmap',
required=True,
help="""OpenImages Challenge labelmap; note: it is expected to include
attributes.""")
parser.add_argument(
'--input_relationship_labelmap',
required=True,
help="""OpenImages Challenge relationship labelmap.""")
parser.add_argument(
'--output_metrics', required=True, help='Output file with csv metrics')
args = parser.parse_args()
main(args)
|
PyTorch/SpeechSynthesis/Tacotron2/platform | platform | DGX1_waveglow_FP32_4NGPU_train | mkdir -p output
python -m multiproc train.py -m WaveGlow -o output/ -lr 1e-4 --epochs 1001 -bs 4 --segment-length 8000 --weight-decay 0 --grad-clip-thresh 3.4028234663852886e+38 --cudnn-benchmark --cudnn-enabled --log-file nvlog.json
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_mobilenet_v2_fpn_feature_extractor_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_fpn_feature_extractor."""
import numpy as np
import tensorflow as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_fpn_feature_extractor
slim = tf.contrib.slim
class SsdMobilenetV2FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (ssd_mobilenet_v2_fpn_feature_extractor.
SSDMobileNetV2FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_384(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name)
def test_fused_batchnorm(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any(op.type == 'FusedBatchNorm'
for op in tf.get_default_graph().get_operations()))
def test_get_expected_feature_map_variable_names(self):
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_maps_variables = set([
# Mobilenet V2 feature maps
'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights',
'MobilenetV2/Conv_1/weights',
# FPN layers
'MobilenetV2/fpn/bottom_up_Conv2d_20/weights',
'MobilenetV2/fpn/bottom_up_Conv2d_21/weights',
'MobilenetV2/fpn/smoothing_1/weights',
'MobilenetV2/fpn/smoothing_2/weights',
'MobilenetV2/fpn/projection_1/weights',
'MobilenetV2/fpn/projection_2/weights',
'MobilenetV2/fpn/projection_3/weights',
])
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple)
feature_extractor.extract_features(preprocessed_inputs)
actual_variable_set = set([
var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
])
variable_intersection = expected_feature_maps_variables.intersection(
actual_variable_set)
self.assertSetEqual(expected_feature_maps_variables,
variable_intersection)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | ssd_mobilenet_v1_0.75_depth_quantized_300x300_coco14_sync | # SSD with Mobilenet v1 with quantized training.
# Trained on COCO, initialized from Imagenet classification checkpoint
# Achieves 18.2 mAP on coco14 minival dataset.
# This config is TPU compatible
model {
ssd {
inplace_batchnorm_update: true
freeze_batchnorm: false
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
encode_background_as_zeros: true
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.2
max_scale: 0.95
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.3333
}
}
image_resizer {
fixed_shape_resizer {
height: 300
width: 300
}
}
box_predictor {
convolutional_box_predictor {
min_depth: 0
max_depth: 0
num_layers_before_predictor: 0
use_dropout: false
dropout_keep_probability: 0.8
kernel_size: 1
box_code_size: 4
apply_sigmoid_to_scores: false
class_prediction_bias_init: -4.6
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
center: true,
decay: 0.97,
epsilon: 0.001,
}
}
}
}
feature_extractor {
type: 'ssd_mobilenet_v1'
min_depth: 16
depth_multiplier: 0.75
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
center: true,
decay: 0.97,
epsilon: 0.001,
}
}
override_base_feature_extractor_hyperparams: true
}
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.75,
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
}
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
normalize_loc_loss_by_codesize: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
batch_size: 128
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 50000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
ssd_random_crop {
}
}
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .2
total_steps: 50000
warmup_learning_rate: 0.06
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-00000-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
num_examples: 8000
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-00000-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
}
graph_rewriter {
quantization {
delay: 48000
activation_bits: 8
weight_bits: 8
}
}
|
PyTorch/Detection/Efficientdet/effdet/csrc/focal_loss | focal_loss | focal_loss_cuda_kernel | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
thread_local int multiProcessorCount=0;
#define ASSERT_UINT4_ALIGNED(PTR) \
AT_ASSERTM(is_aligned<uint4>(PTR), "Tensor " #PTR " is not uint4 aligned")
template <class T> bool is_aligned(const void *ptr) noexcept {
auto iptr = reinterpret_cast<std::uintptr_t>(ptr);
return !(iptr % alignof(T));
}
template <bool SMOOTHING, int ILP, typename scalar_t, typename labelscalar_t,
typename accscalar_t, typename outscalar_t>
__global__ void focal_loss_forward_cuda_kernel(
outscalar_t *loss, scalar_t *partial_grad,
const scalar_t *__restrict__ cls_output,
const labelscalar_t *__restrict__ cls_targets_at_level,
const float *__restrict__ num_positives_sum, const int64_t num_examples,
const int64_t num_classes, const int64_t num_real_classes,
const float alpha, const float gamma, const float smoothing_factor) {
extern __shared__ unsigned char shm[];
accscalar_t *loss_shm = reinterpret_cast<accscalar_t *>(shm);
loss_shm[threadIdx.x] = 0;
accscalar_t loss_acc = 0;
accscalar_t one = accscalar_t(1.0);
accscalar_t K = accscalar_t(2.0);
accscalar_t normalizer = one / static_cast<accscalar_t>(num_positives_sum[0]);
accscalar_t nn_norm, np_norm, pn_norm, pp_norm;
// *_norm is used for label smoothing only
if (SMOOTHING) {
nn_norm = one - smoothing_factor / K;
np_norm = smoothing_factor / K;
pn_norm = smoothing_factor - smoothing_factor / K;
pp_norm = one - smoothing_factor + smoothing_factor / K;
}
uint4 p_vec, grad_vec;
// Accumulate loss on each thread
for (int64_t i = (blockIdx.x * blockDim.x + threadIdx.x) * ILP;
i < num_examples * num_classes; i += gridDim.x * blockDim.x * ILP) {
int64_t idy = i / num_classes;
labelscalar_t y = cls_targets_at_level[idy];
int64_t base_yid = i % num_classes;
int64_t pos_idx = idy * num_classes + y;
p_vec = *(uint4 *)&cls_output[i];
// Skip ignored matches
if (y == -2) {
#pragma unroll
for (int j = 0; j < ILP; j++) {
*((scalar_t *)(&grad_vec) + j) = 0;
}
*(uint4 *)&partial_grad[i] = grad_vec;
continue;
}
#pragma unroll
for (int j = 0; j < ILP; j++) {
// Skip the pad classes
if (base_yid + j >= num_real_classes) {
*((scalar_t *)(&grad_vec) + j) = 0;
continue;
}
accscalar_t p = static_cast<accscalar_t>(*((scalar_t *)(&p_vec) + j));
accscalar_t exp_np = ::exp(-p);
accscalar_t exp_pp = ::exp(p);
accscalar_t sigma = one / (one + exp_np);
accscalar_t logee = (p >= 0) ? exp_np : exp_pp;
accscalar_t addee = (p >= 0) ? 0 : -p;
accscalar_t off_a = addee + ::log(one + logee);
// Negative matches
accscalar_t base = SMOOTHING ? nn_norm * p : p;
accscalar_t off_b = (SMOOTHING ? np_norm : 0) - sigma;
accscalar_t coeff_f1 = one - alpha;
accscalar_t coeff_f2 = sigma;
accscalar_t coeff_b1 = gamma;
accscalar_t coeff_b2 = one - sigma;
// Positive matches
if (y >= 0 && (i + j == pos_idx)) {
base = SMOOTHING ? pn_norm * p : 0;
off_b = (SMOOTHING ? pp_norm : one) - sigma;
coeff_f1 = alpha;
coeff_f2 = one - sigma;
coeff_b1 = -gamma;
coeff_b2 = sigma;
}
accscalar_t coeff_f = coeff_f1 * ::pow(coeff_f2, gamma);
accscalar_t coeff_b = coeff_b1 * coeff_b2;
accscalar_t loss_t = coeff_f * (base + off_a);
accscalar_t grad = coeff_f * (coeff_b * (base + off_a) - off_b);
// Delay the normalize of partial gradient by num_positives_sum to back
// propagation because scalar_t reduces precision. Focal loss is very
// sensitive to the small gradient. No worry on overflow here since
// gradient has relative smaller range than input.
loss_acc += loss_t;
*((scalar_t *)(&grad_vec) + j) = static_cast<scalar_t>(grad);
}
// This can't ensure to generate stg.128 and may be two stg.64.
*(uint4 *)&partial_grad[i] = grad_vec;
}
loss_shm[threadIdx.x] = loss_acc;
// Intra-CTA reduction
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
loss_shm[threadIdx.x] += loss_shm[threadIdx.x + s];
}
__syncthreads();
}
// Inter-CTA reduction
if (threadIdx.x == 0) {
loss_acc = loss_shm[0] * normalizer;
atomicAdd(loss, loss_acc);
}
}
template <int ILP, typename scalar_t, typename accscalar_t,
typename outscalar_t>
__global__ void focal_loss_backward_cuda_kernel(
scalar_t *partial_grad, const outscalar_t *__restrict__ grad_output,
const float *__restrict__ num_positives_sum, const uint64_t numel) {
int64_t idx = (blockIdx.x * blockDim.x + threadIdx.x) * ILP;
accscalar_t normalizer = static_cast<accscalar_t>(grad_output[0]) /
static_cast<accscalar_t>(num_positives_sum[0]);
// The input is enforced to pad to use vector load, thus there's no need to
// check whether the last element of ILP can out of bound.
if (idx >= numel)
return;
uint4 grad_vec;
grad_vec = *(uint4 *)&partial_grad[idx];
#pragma unroll(ILP)
for (int i = 0; i < ILP; i++) {
auto grad = static_cast<accscalar_t>(*((scalar_t *)(&grad_vec) + i));
grad *= normalizer;
*((scalar_t *)(&grad_vec) + i) = static_cast<scalar_t>(grad);
}
*(uint4 *)&partial_grad[idx] = grad_vec;
}
std::vector<at::Tensor> focal_loss_forward_cuda(
const at::Tensor &cls_output, const at::Tensor &cls_targets_at_level,
const at::Tensor &num_positives_sum, const int64_t num_real_classes,
const float alpha, const float gamma, const float smoothing_factor) {
// Checks required for correctness
AT_ASSERTM(cls_output.size(-1) >= num_real_classes,
"Incorrect number of real classes.");
AT_ASSERTM(cls_targets_at_level.scalar_type() == at::kLong,
"Invalid label type.");
AT_ASSERTM(
(num_positives_sum.numel() == 1) &&
(num_positives_sum.scalar_type() == at::kFloat),
"Expect num_positives_sum to be a float32 tensor with only one element.");
AT_ASSERTM(cls_output.dim() == cls_targets_at_level.dim() + 1,
"Mis-matched dimensions between class output and label.");
for (int64_t i = 0; i < cls_targets_at_level.dim(); i++)
AT_ASSERTM(cls_output.size(i) == cls_targets_at_level.size(i),
"Mis-matched shape between class output and label.");
// Checks required for better performance
const int ILP = sizeof(uint4) / cls_output.element_size();
ASSERT_UINT4_ALIGNED(cls_output.data_ptr());
AT_ASSERTM(cls_output.size(-1) % ILP == 0,
"Pad number of classes first to take advantage of 128 bit load.");
AT_ASSERTM(num_real_classes >= ILP, "Too few classes.");
int64_t num_classes = cls_output.size(-1);
int64_t num_examples = cls_output.numel() / num_classes;
at::Tensor loss = at::zeros({}, cls_output.options().dtype(at::kFloat));
// Compute the incompelete gradient during fprop since most of the heavy
// functions of bprop are the same as fprop, thus trade memory for compute
// helps with focal loss.
at::Tensor partial_grad = at::empty_like(cls_output);
// The grid contains 2 CTA per SM, each CTA loop on input with stride till the
// last item.
if (multiProcessorCount == 0) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, at::cuda::current_device());
multiProcessorCount = props.multiProcessorCount;
}
dim3 block(512);
dim3 grid(2 * multiProcessorCount);
// Specialize on label smoothing or not to reduce redundant operations
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (smoothing_factor == 0.0f) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
cls_output.scalar_type(), "focal_loss_fprop", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
using labelscalar_t = int64_t;
using outscalar_t = float;
const int ILP = sizeof(uint4) / sizeof(scalar_t);
focal_loss_forward_cuda_kernel<false, ILP, scalar_t, labelscalar_t,
accscalar_t, outscalar_t>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
loss.data_ptr<outscalar_t>(),
partial_grad.data_ptr<scalar_t>(),
cls_output.data_ptr<scalar_t>(),
cls_targets_at_level.data_ptr<labelscalar_t>(),
num_positives_sum.data_ptr<float>(), num_examples,
num_classes, num_real_classes, alpha, gamma,
smoothing_factor);
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
cls_output.scalar_type(), "focal_loss_fprop", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
using labelscalar_t = int64_t;
using outscalar_t = float;
const int ILP = sizeof(uint4) / sizeof(scalar_t);
focal_loss_forward_cuda_kernel<true, ILP, scalar_t, labelscalar_t,
accscalar_t, outscalar_t>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
loss.data_ptr<outscalar_t>(),
partial_grad.data_ptr<scalar_t>(),
cls_output.data_ptr<scalar_t>(),
cls_targets_at_level.data_ptr<labelscalar_t>(),
num_positives_sum.data_ptr<float>(), num_examples,
num_classes, num_real_classes, alpha, gamma,
smoothing_factor);
});
}
THCudaCheck(cudaGetLastError());
return {loss, partial_grad};
}
at::Tensor focal_loss_backward_cuda(const at::Tensor &grad_output,
const at::Tensor &partial_grad,
const at::Tensor &num_positives_sum) {
// Each thread process ILP elements
const int ILP = sizeof(uint4) / partial_grad.element_size();
dim3 block(512);
dim3 grid((partial_grad.numel() + block.x * ILP - 1) / (block.x * ILP));
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
partial_grad.scalar_type(), "focal_loss_bprop", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
using outscalar_t = float;
const int ILP = sizeof(uint4) / sizeof(scalar_t);
focal_loss_backward_cuda_kernel<ILP, scalar_t, accscalar_t, outscalar_t>
<<<grid, block, 0, stream>>>(partial_grad.data_ptr<scalar_t>(),
grad_output.data_ptr<outscalar_t>(),
num_positives_sum.data_ptr<float>(),
partial_grad.numel());
});
THCudaCheck(cudaGetLastError());
return partial_grad;
}
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/structures | structures | bounding_box | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class BoxList(object):
"""
This class represents a set of bounding boxes.
The bounding boxes are represented as a Nx4 Tensor.
In order to uniquely determine the bounding boxes with respect
to an image, we also store the corresponding image dimensions.
They can contain extra information that is specific to each bounding box, such as
labels.
"""
def __init__(self, bbox, image_size, mode="xyxy"):
device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
if bbox.ndimension() != 2:
raise ValueError(
"bbox should have 2 dimensions, got {}".format(bbox.ndimension())
)
if bbox.size(-1) != 4:
raise ValueError(
"last dimenion of bbox should have a "
"size of 4, got {}".format(bbox.size(-1))
)
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
self.bbox = bbox
self.size = image_size # (image_width, image_height)
self.mode = mode
self.extra_fields = {}
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def has_field(self, field):
return field in self.extra_fields
def fields(self):
return list(self.extra_fields.keys())
def _copy_extra_fields(self, bbox):
for k, v in bbox.extra_fields.items():
self.extra_fields[k] = v
def convert(self, mode):
if mode not in ("xyxy", "xywh"):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
# we only have two modes, so don't need to check
# self.mode
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if mode == "xyxy":
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat(
(xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
def _split_into_xyxy(self):
if self.mode == "xyxy":
xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = self.bbox.split(1, dim=-1)
return (
xmin,
ymin,
xmin + (w - TO_REMOVE).clamp(min=0),
ymin + (h - TO_REMOVE).clamp(min=0),
)
else:
raise RuntimeError("Should not be here")
def resize(self, size, *args, **kwargs):
"""
Returns a resized copy of this bounding box
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_box = self.bbox * ratio
bbox = BoxList(scaled_box, size, mode=self.mode)
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox
ratio_width, ratio_height = ratios
xmin, ymin, xmax, ymax = self._split_into_xyxy()
scaled_xmin = xmin * ratio_width
scaled_xmax = xmax * ratio_width
scaled_ymin = ymin * ratio_height
scaled_ymax = ymax * ratio_height
scaled_box = torch.cat(
(scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
)
bbox = BoxList(scaled_box, size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.resize(size, *args, **kwargs)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
image_width, image_height = self.size
xmin, ymin, xmax, ymax = self._split_into_xyxy()
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat(
(transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
)
bbox = BoxList(transposed_boxes, self.size, mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
def crop(self, box):
"""
Cropss a rectangular region from this bounding box. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
"""
xmin, ymin, xmax, ymax = self._split_into_xyxy()
w, h = box[2] - box[0], box[3] - box[1]
cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
# TODO should I filter empty boxes here?
if False:
is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
cropped_box = torch.cat(
(cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
)
bbox = BoxList(cropped_box, (w, h), mode="xyxy")
# bbox._copy_extra_fields(self)
for k, v in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.crop(box)
bbox.add_field(k, v)
return bbox.convert(self.mode)
# Tensor-like methods
def to(self, device, **kwargs):
bbox = BoxList(self.bbox.to(device, non_blocking=True), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
if torch.is_tensor(v):
v_tmp = torch.empty_like(v, device=device)
v_tmp.copy_(v, **kwargs)
v = v_tmp
else:
v = v.to(device, **kwargs)
bbox.add_field(k, v)
return bbox
def pin_memory(self):
bbox = BoxList(self.bbox.pin_memory(), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "pin_memory"):
v = v.pin_memory()
bbox.add_field(k, v)
return bbox
def __getitem__(self, item):
bbox = BoxList(self.bbox[item], self.size, self.mode)
for k, v in self.extra_fields.items():
bbox.add_field(k, v[item])
return bbox
def __len__(self):
return self.bbox.shape[0]
def clip_to_image(self, remove_empty=True):
TO_REMOVE = 1
self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
if remove_empty:
box = self.bbox
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return self[keep]
return self
def area(self):
box = self.bbox
if self.mode == "xyxy":
TO_REMOVE = 1
area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
elif self.mode == "xywh":
area = box[:, 2] * box[:, 3]
else:
raise RuntimeError("Should not be here")
return area
def copy_with_fields(self, fields):
bbox = BoxList(self.bbox, self.size, self.mode)
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
bbox.add_field(field, self.get_field(field))
return bbox
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_boxes={}, ".format(len(self))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
if __name__ == "__main__":
bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
s_bbox = bbox.resize((5, 5))
print(s_bbox)
print(s_bbox.bbox)
t_bbox = bbox.transpose(0)
print(t_bbox)
print(t_bbox.bbox)
|
PyTorch/Forecasting/TFT/triton/deployment_toolkit | deployment_toolkit | report | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | __main__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .executor import Executor
from .finalizer import ExperimentFinalizer
from .maintainer import DockerMaintainer
from .preparer import ExperimentPreparer
from .runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) |
PyTorch/Classification/ConvNets/image_classification | image_classification | logger | # Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
from numbers import Number
import dllogger
import numpy as np
def format_step(step):
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
if isinstance(step[0], Number):
s += "Epoch: {} ".format(step[0])
else:
s += "{} ".format(step[0])
if len(step) > 1:
s += "Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
if len(step) == 0:
s = "Summary:"
return s
PERF_METER = lambda: Meter(AverageMeter(), AverageMeter(), AverageMeter())
LOSS_METER = lambda: Meter(AverageMeter(), AverageMeter(), MinMeter())
ACC_METER = lambda: Meter(AverageMeter(), AverageMeter(), MaxMeter())
LR_METER = lambda: Meter(LastMeter(), LastMeter(), LastMeter())
LAT_100 = lambda: Meter(QuantileMeter(1), QuantileMeter(1), QuantileMeter(1))
LAT_99 = lambda: Meter(QuantileMeter(0.99), QuantileMeter(0.99), QuantileMeter(0.99))
LAT_95 = lambda: Meter(QuantileMeter(0.95), QuantileMeter(0.95), QuantileMeter(0.95))
class Meter(object):
def __init__(self, iteration_aggregator, epoch_aggregator, run_aggregator):
self.run_aggregator = run_aggregator
self.epoch_aggregator = epoch_aggregator
self.iteration_aggregator = iteration_aggregator
def record(self, val, n=1):
self.iteration_aggregator.record(val, n=n)
def get_iteration(self):
v, n = self.iteration_aggregator.get_val()
return v
def reset_iteration(self):
v, n = self.iteration_aggregator.get_data()
self.iteration_aggregator.reset()
if v is not None:
self.epoch_aggregator.record(v, n=n)
def get_epoch(self):
v, n = self.epoch_aggregator.get_val()
return v
def reset_epoch(self):
v, n = self.epoch_aggregator.get_data()
self.epoch_aggregator.reset()
if v is not None:
self.run_aggregator.record(v, n=n)
def get_run(self):
v, n = self.run_aggregator.get_val()
return v
def reset_run(self):
self.run_aggregator.reset()
class QuantileMeter(object):
def __init__(self, q):
self.q = q
self.reset()
def reset(self):
self.vals = []
self.n = 0
def record(self, val, n=1):
if isinstance(val, list):
self.vals += val
self.n += len(val)
else:
self.vals += [val] * n
self.n += n
def get_val(self):
if not self.vals:
return None, self.n
return np.quantile(self.vals, self.q, interpolation="nearest"), self.n
def get_data(self):
return self.vals, self.n
class MaxMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.max = None
self.n = 0
def record(self, val, n=1):
if self.max is None:
self.max = val
else:
self.max = max(self.max, val)
self.n = n
def get_val(self):
return self.max, self.n
def get_data(self):
return self.max, self.n
class MinMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.min = None
self.n = 0
def record(self, val, n=1):
if self.min is None:
self.min = val
else:
self.min = max(self.min, val)
self.n = n
def get_val(self):
return self.min, self.n
def get_data(self):
return self.min, self.n
class LastMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.last = None
self.n = 0
def record(self, val, n=1):
self.last = val
self.n = n
def get_val(self):
return self.last, self.n
def get_data(self):
return self.last, self.n
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.n = 0
self.val = 0
def record(self, val, n=1):
self.n += n
self.val += val * n
def get_val(self):
if self.n == 0:
return None, 0
return self.val / self.n, self.n
def get_data(self):
if self.n == 0:
return None, 0
return self.val / self.n, self.n
class Logger(object):
def __init__(self, print_interval, backends, start_epoch=-1, verbose=False):
self.epoch = start_epoch
self.iteration = -1
self.val_iteration = -1
self.calib_iteration = -1
self.metrics = OrderedDict()
self.backends = backends
self.print_interval = print_interval
self.verbose = verbose
dllogger.init(backends)
def log_parameter(self, data, verbosity=0):
dllogger.log(step="PARAMETER", data=data, verbosity=verbosity)
def register_metric(self, metric_name, meter, verbosity=0, metadata={}):
if self.verbose:
print("Registering metric: {}".format(metric_name))
self.metrics[metric_name] = {"meter": meter, "level": verbosity}
dllogger.metadata(metric_name, metadata)
def log_metric(self, metric_name, val, n=1):
self.metrics[metric_name]["meter"].record(val, n=n)
def start_iteration(self, mode="train"):
if mode == "val":
self.val_iteration += 1
elif mode == "train":
self.iteration += 1
elif mode == "calib":
self.calib_iteration += 1
def end_iteration(self, mode="train"):
if mode == "val":
it = self.val_iteration
elif mode == "train":
it = self.iteration
elif mode == "calib":
it = self.calib_iteration
if it % self.print_interval == 0 or mode == "calib":
metrics = {n: m for n, m in self.metrics.items() if n.startswith(mode)}
if mode == "train":
step = (self.epoch, self.iteration)
elif mode == "val":
step = (self.epoch, self.iteration, self.val_iteration)
elif mode == "calib":
step = ("Calibration", self.calib_iteration)
verbositys = {m["level"] for _, m in metrics.items()}
for ll in verbositys:
llm = {n: m for n, m in metrics.items() if m["level"] == ll}
dllogger.log(
step=step,
data={n: m["meter"].get_iteration() for n, m in llm.items()},
verbosity=ll,
)
for n, m in metrics.items():
m["meter"].reset_iteration()
dllogger.flush()
def start_epoch(self):
self.epoch += 1
self.iteration = 0
self.val_iteration = 0
for n, m in self.metrics.items():
if not n.startswith("calib"):
m["meter"].reset_epoch()
def end_epoch(self):
for n, m in self.metrics.items():
if not n.startswith("calib"):
m["meter"].reset_iteration()
verbositys = {m["level"] for _, m in self.metrics.items()}
for ll in verbositys:
llm = {n: m for n, m in self.metrics.items() if m["level"] == ll}
dllogger.log(
step=(self.epoch,),
data={n: m["meter"].get_epoch() for n, m in llm.items()},
)
def start_calibration(self):
self.calib_iteration = 0
for n, m in self.metrics.items():
if n.startswith("calib"):
m["meter"].reset_epoch()
def end_calibration(self):
for n, m in self.metrics.items():
if n.startswith("calib"):
m["meter"].reset_iteration()
def end(self):
for n, m in self.metrics.items():
m["meter"].reset_epoch()
verbositys = {m["level"] for _, m in self.metrics.items()}
for ll in verbositys:
llm = {n: m for n, m in self.metrics.items() if m["level"] == ll}
dllogger.log(
step=tuple(), data={n: m["meter"].get_run() for n, m in llm.items()}
)
for n, m in self.metrics.items():
m["meter"].reset_epoch()
dllogger.flush()
def iteration_generator_wrapper(self, gen, mode="train"):
for g in gen:
self.start_iteration(mode=mode)
yield g
self.end_iteration(mode=mode)
def epoch_generator_wrapper(self, gen):
for g in gen:
self.start_epoch()
yield g
self.end_epoch()
class Metrics:
ACC_METADATA = {"unit": "%", "format": ":.2f"}
IPS_METADATA = {"unit": "images/s", "format": ":.2f"}
TIME_METADATA = {"unit": "s", "format": ":.5f"}
LOSS_METADATA = {"unit": None, "format": ":.5f"}
LR_METADATA = {"unit": None, "format": ":.5f"}
def __init__(self, logger):
self.logger = logger
self.map = {}
def log(self, **kwargs):
if self.logger is None:
return
for k, v in kwargs.items():
tks = self.map.get(k, [k])
for tk in tks:
if isinstance(v, tuple):
self.logger.log_metric(tk, v[0], v[1])
else:
self.logger.log_metric(tk, v)
class TrainingMetrics(Metrics):
def __init__(self, logger):
super().__init__(logger)
if self.logger is not None:
self.map = {
"loss": ["train.loss"],
"compute_ips": ["train.compute_ips"],
"total_ips": ["train.total_ips"],
"data_time": ["train.data_time"],
"compute_time": ["train.compute_time"],
"lr": ["train.lr"],
"grad_scale": ["train.grad_scale"],
}
logger.register_metric(
"train.loss",
LOSS_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.LOSS_METADATA,
)
logger.register_metric(
"train.compute_ips",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.IPS_METADATA,
)
logger.register_metric(
"train.total_ips",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.IPS_METADATA,
)
logger.register_metric(
"train.data_time",
PERF_METER(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
"train.compute_time",
PERF_METER(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
"train.lr",
LR_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
)
logger.register_metric(
"train.grad_scale",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.LOSS_METADATA,
)
class ValidationMetrics(Metrics):
def __init__(self, logger, prefix, topk):
super().__init__(logger)
if self.logger is not None:
self.map = {
"loss": [f"{prefix}.loss"],
"top1": [f"{prefix}.top1"],
f"top{topk}": [f"{prefix}.top{topk}"],
"compute_ips": [f"{prefix}.compute_ips"],
"total_ips": [f"{prefix}.total_ips"],
"data_time": [f"{prefix}.data_time"],
"compute_time": [
f"{prefix}.compute_latency",
f"{prefix}.compute_latency_at100",
f"{prefix}.compute_latency_at99",
f"{prefix}.compute_latency_at95",
],
}
logger.register_metric(
f"{prefix}.top1",
ACC_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.ACC_METADATA,
)
logger.register_metric(
f"{prefix}.top{topk}",
ACC_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.ACC_METADATA,
)
logger.register_metric(
f"{prefix}.loss",
LOSS_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.LOSS_METADATA,
)
logger.register_metric(
f"{prefix}.compute_ips",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.IPS_METADATA,
)
logger.register_metric(
f"{prefix}.total_ips",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.IPS_METADATA,
)
logger.register_metric(
f"{prefix}.data_time",
PERF_METER(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
f"{prefix}.compute_latency",
PERF_METER(),
verbosity=dllogger.Verbosity.DEFAULT,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
f"{prefix}.compute_latency_at100",
LAT_100(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
f"{prefix}.compute_latency_at99",
LAT_99(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
logger.register_metric(
f"{prefix}.compute_latency_at95",
LAT_95(),
verbosity=dllogger.Verbosity.VERBOSE,
metadata=Metrics.TIME_METADATA,
)
|
TensorFlow2/Detection/Efficientdet/scripts/D0 | D0 | convergence-FP32-8xV100-32G | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bs=40
ep=300
lr=0.4
wu=5
ema=0.9999
momentum=0.9
mkdir -p /tmp/convergence-FP32-8xV100-32G
curr_dt=`date +"%Y-%m-%d-%H-%M-%S"`
mpirun -np 8 --allow-run-as-root --bind-to none \
-map-by slot -x LD_LIBRARY_PATH -x PATH \
-mca pml ob1 -mca btl ^openib \
-x CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
python3 train.py \
--training_mode=${training_mode:=traineval} \
--training_file_pattern=/workspace/coco/train-* \
--val_file_pattern=/workspace/coco/val-* \
--val_json_file=/workspace/coco/annotations/instances_val2017.json \
--model_name=efficientdet-d0 \
--model_dir=/tmp/convergence-FP32-8xV100-32G \
--backbone_init=/workspace/checkpoints/efficientnet-b0-joc \
--batch_size=$bs \
--eval_batch_size=$bs \
--num_epochs=$ep \
--use_xla=True \
--amp=False \
--lr=$lr \
--warmup_epochs=$wu \
--hparams="moving_average_decay=$ema,momentum=$momentum" \
2>&1 | tee /tmp/convergence-FP32-8xV100-32G/train-$curr_dt.log |
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | ssd_inception_v2_coco | # SSD with Inception v2 configuration for MSCOCO Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
ssd {
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
}
}
similarity_calculator {
iou_similarity {
}
}
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.2
max_scale: 0.95
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.3333
reduce_boxes_in_lowest_layer: true
}
}
image_resizer {
fixed_shape_resizer {
height: 300
width: 300
}
}
box_predictor {
convolutional_box_predictor {
min_depth: 0
max_depth: 0
num_layers_before_predictor: 0
use_dropout: false
dropout_keep_probability: 0.8
kernel_size: 3
box_code_size: 4
apply_sigmoid_to_scores: false
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
}
}
}
feature_extractor {
type: 'ssd_inception_v2'
min_depth: 16
depth_multiplier: 1.0
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.00004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
train: true,
scale: true,
center: true,
decay: 0.9997,
epsilon: 0.001,
}
}
override_base_feature_extractor_hyperparams: true
}
loss {
classification_loss {
weighted_sigmoid {
}
}
localization_loss {
weighted_smooth_l1 {
}
}
hard_example_miner {
num_hard_examples: 3000
iou_threshold: 0.99
loss_type: CLASSIFICATION
max_negatives_per_positive: 3
min_negatives_per_image: 0
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
batch_size: 24
optimizer {
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
}
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
ssd_random_crop {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
}
eval_config: {
num_examples: 8000
# Note: The below line limits the evaluation process to 10 evaluations.
# Remove the below line to evaluate indefinitely.
max_evals: 10
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
TensorFlow2/Segmentation/MaskRCNN/scripts | scripts | inference | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script that simplifies inference. """
import argparse
import os
import shutil
import subprocess
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
if __name__ == '__main__':
# CLI flags
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description=(
'NVIDIA MaskRCNN TF2 inference'
'\n\nNote: Any additional flags not specified below will be passed to main.py'
),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100)
)
parser.add_argument('--batch_size', type=int, metavar='N', default=8,
help='Batch size used during inference')
parser.add_argument('--amp', action='store_true',
help='Enable automatic mixed precision')
parser.add_argument('--no_xla', action='store_true',
help='Disables XLA - accelerated linear algebra')
parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data',
help='Input directory containing the dataset')
parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights',
help='Directory containing pre-trained resnet weights')
flags, remainder = parser.parse_known_args()
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py'))
checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp")
# build command
cmd = (
f'python {main_path}'
f' infer'
f' --data_dir "{flags.data_dir}"'
f' --backbone_checkpoint "{checkpoint_path}"'
f' --eval_batch_size {flags.batch_size}'
)
if not flags.no_xla:
cmd += ' --xla'
if flags.amp:
cmd += ' --amp'
if remainder:
cmd += ' ' + ' '.join(remainder)
# print command
line = '-' * shutil.get_terminal_size()[0]
print(line, cmd, line, sep='\n', flush=True)
# run model
exit(subprocess.call(cmd, shell=True))
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/graph_aligner | graph_aligner | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
from syngen.graph_aligner.base_graph_aligner import BaseGraphAligner
from syngen.graph_aligner.xgboost_aligner import XGBoostAligner
aligner_classes = {
'xgboost': XGBoostAligner,
}
|
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit | deployment_toolkit | core | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
class Accelerator(Parameter):
AMP = "amp"
CUDA = "cuda"
TRT = "trt"
class Precision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
class Model(NamedTuple):
handle: object
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path]) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/doc | doc | DCNv2 | # DCNv2 for TensorFlow 2
## Table Of Contents
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Quick Start Guide](#quick-start-guide)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Training process](#training-process)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (8x A100 80GB)](#inference-performance-nvidia-dgx-a100-8x-a100-80gb)
## Model overview
The Deep Cross Network version 2 models (DCNv2) were first proposed in
[DCN V2: Improved Deep & Cross Network and Practical Lessons for Web-scale Learning to Rank Systems](https://arxiv.org/abs/2008.13535)
as an improvement upon [ Deep & Cross Network for Ad Click Predictions.](https://arxiv.org/abs/1708.05123).
It is a learning-to-rank algorithm designed to efficiently learn feature interactions. In this repository, we implement
an example of a DCNv2 model by replacing DLRM's dot interaction layer with a low-rank Deep Cross Network v2 interaction.
For DCNv2, we also chose to use the Adam optimization algorithm to better reflect common industry practices.
This also significantly improves results on the Criteo 1TB dataset but also increases memory usage.
Similarly to our DLRM implementation, we use a technique
called frequency thresholding to demonstrate models of different sizes.
The table below summarizes the model sizes and frequency thresholds used in this repository.
"Total embedding size" means the amount of memory necessary for a single forward pass, while the "GPU Memory required for training"
also includes the memory needed to store the full optimizer state.
The table below summarizes the model sizes and frequency thresholds used in this repository, for both the synthetic and real datasets supported.
| Dataset | Frequency Threshold | Final dataset size | Intermediate preprocessing storage required | Suitable for accuracy tests | Total download & preprocess time | GPU Memory required for training | Total embedding size | Number of model parameters |
|:-------|:-------|:-------|:-------------|:-------------------|:-------------------|:-------------------|:-------------------|:-------------------|
| Synthetic T15 |15 | 6 GiB | None | No | ~Minutes | 48 GiB | 15.6 GiB | 4.2B |
| Synthetic T3 |3 | 6 GiB | None | No | ~Minutes | 250 GiB | 84.9 GiB | 22.8B |
| Synthetic T0 |0 | 6 GiB | None | No | ~Minutes | 1.27 TiB | 421 GiB | 113B |
| Real Criteo T15 |15 | 370 GiB | ~Terabytes | Yes | ~Hours | 48 GiB | 15.6 GiB | 4.2B |
| Real Criteo T3 |3 | 370 GiB | ~Terabytes | Yes | ~Hours | 250 GiB | 84.9 GiB | 22.8B |
| Real Criteo T0 |0 | 370 GiB | ~Terabytes | Yes | ~Hours | 1.27 TiB | 421 GiB | 113B |
You can find a detailed description of the Criteo dataset preprocessing the [preprocessing documentation](./criteo_dataset.md#advanced).
### Model architecture
DCNv2 accepts two types of features: categorical and numerical. For each categorical feature,
an embedding table is used to provide a dense representation of each unique value.
The dense features enter the model and are transformed by a simple neural network referred to as "Bottom MLP".
This part of the network consists of a series
of linear layers with ReLU activations. The output of the bottom MLP and the embedding vectors are then fed into the
Deep Cross Network v2 interaction layer.
The output of this layer is then concatenated
with the features resulting from the bottom MLP and fed
into the "top MLP," which is a series of dense layers with activations.
The model outputs a single number which can be interpreted as a likelihood of a certain user clicking an ad.
<p align="center">
<img width="100%" src="./img/dcnv2_singlegpu_architecture.svg" />
<br>
Figure 1. The architecture of our DCNv2 model.
</p>
### Hardware requirements
| Dataset | Disk space required | Total GPU memory required for training | Total embedding size | Suitable for accuracy tests | Total download & preprocess time |
|:-------|:-------------|:-------------------|:-------------------|:-------------------|:-------------------|
| Synthetic Criteo T15 | 370 GiB | 48 GiB | 16 GiB | No | ~Hours |
| Synthetic Criteo T3 | 370 GiB | 250 GiB | 82 GiB | No | ~Hours |
| Synthetic Criteo T0 | 370 GiB | 1.27 TiB | 421 GiB | No | ~Hours |
| Real Criteo T15 | 6 GiB | 48 GiB | 16 GiB | Yes | ~Minutes |
| Real Criteo T3 | 6 GiB | 250 GiB | 82 GiB | Yes | ~Minutes |
| Real Criteo T0 | 6 GiB | 1.27 TiB | 421 GiB | Yes | ~Minutes |
## Quick Start Guide
To train DCNv2 perform the following steps.
For the specifics concerning training and inference,
refer to the [Advanced](../README.md#advanced) section.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/TensorFlow2/Recommendation/DLRM
```
2. Build and run a DCNv2 Docker container.
```bash
docker build -t train_docker_image .
docker run --cap-add SYS_NICE --runtime=nvidia -it --rm --ipc=host -v ${PWD}/data:/data train_docker_image bash
```
3. Generate a synthetic dataset.
Downloading and preprocessing the Criteo 1TB dataset requires a lot of time and disk space.
Because of this we provide a synthetic dataset generator that roughly matches Criteo 1TB characteristics.
This will enable you to benchmark quickly.
If you prefer to benchmark on the real data, please follow [these instructions](./criteo_dataset.md#quick-start-guide)
to download and preprocess the dataset.
```bash
python -m dataloading.generate_feature_spec --variant criteo_t15_synthetic --dst feature_spec.yaml
python -m dataloading.transcribe --src_dataset_type synthetic --src_dataset_path . \
--dst_dataset_path /data/preprocessed --max_batches_train 1000 --max_batches_test 100 --dst_dataset_type tf_raw
```
4. Verify the input data:
After running `tree /data/preprocessed` you should see the following directory structure:
```bash
$ tree /data/preprocessed
/data/preprocessed
├── feature_spec.yaml
├── test
│ ├── cat_0.bin
│ ├── cat_1.bin
│ ├── ...
│ ├── label.bin
│ └── numerical.bin
└── train
├── cat_0.bin
├── cat_1.bin
├── ...
├── label.bin
└── numerical.bin
2 directories, 57 files
```
5. Start training.
- single-GPU:
```bash
horovodrun -np 1 -H localhost:1 --mpi-args=--oversubscribe numactl --interleave=all -- python -u dcnv2.py --dataset_path /data/preprocessed --amp --xla --save_checkpoint_path /data/checkpoint/
```
- multi-GPU:
```bash
horovodrun -np 8 -H localhost:8 --mpi-args=--oversubscribe numactl --interleave=all -- python -u dcnv2.py --dataset_path /data/preprocessed --amp --xla --save_checkpoint_path /data/checkpoint/
```
6. Start evaluation.
To evaluate a previously trained checkpoint, append `--restore_checkpoint_path <path> --mode eval` to the command used for training. For example, to test a checkpoint trained on 8xA100 80GB, run:
```bash
horovodrun -np 8 -H localhost:8 --mpi-args=--oversubscribe numactl --interleave=all -- python -u dcnv2.py --dataset_path /data/preprocessed --amp --xla --restore_checkpoint_path /data/checkpoint/ --mode eval
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark the training performance on a specific batch size, follow the instructions
in the [Quick Start Guide](#quick-start-guide). You can also add the `--max_steps 1000`
if you want to get a reliable throughput measurement without running the entire training.
You can also use synthetic data by running with the `--dataset_type synthetic` option if you haven't downloaded the dataset yet.
#### Inference performance benchmark
To benchmark the inference performance on a specific batch size, run:
```
horovodrun -np 1 -H localhost:1 --mpi-args=--oversubscribe numactl --interleave=all -- python -u dcnv2.py --dataset_path /data/preprocessed --amp --restore_checkpoint_path <checkpoint_path> --mode inference
```
### Training process
The main training scripts resides in `dcnv2.py`. The training speed is measured by throughput, that is,
the number of samples processed per second.
We use mixed precision training with static loss scaling for the bottom and top MLPs
while embedding tables are stored in FP32 format.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
We used three model size variants to show memory scalability in a multi-GPU setup
(4.2B params, 22.8B params, and 113B params). Refer to the [Model overview](#model-overview) section for detailed
information about the model variants.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running training scripts as described in the Quick Start Guide in the DCNv2 Docker container.
| GPUs | Model size | Batch size / GPU | Accuracy (AUC) - TF32 | Accuracy (AUC) - mixed precision | Time to train - TF32 [minutes] | Time to train - mixed precision [minutes] | Time to train speedup (TF32 to mixed precision) |
|:-------|:-------------|:-------------------|:------------------------|:-----------------------------------|:---------------------------------|:--------------------------------------------|:--------------------------------------------------|
| 1 | small | 64k | 0.8078 | 0.8077 | 102.7 | 51.7 | 1.99 |
| 8 | large | 8k | 0.8075 | 0.8074 | 19.5 | 13.3 | 1.33 |
##### Training stability test
The histograms below show the distribution of ROC AUC results achieved at the end of the training.
<p align="center">
<img width="100%" src="./img/dcnv2_stability_test.svg" />
<br>
Figure 4. Results of stability tests for DCNv2.
</p>
#### Training performance results
We used throughput in items processed per second as the performance metric.
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by following the commands from the Quick Start Guide
in the DCNv2 Docker container on NVIDIA DGX A100 (8x A100 80GB) GPUs. Performance numbers (in items per second) were averaged over 1000 training steps.
| GPUs | Model size | Batch size / GPU | Throughput - TF32 | Throughput - mixed precision | Throughput speedup (TF32 to mixed precision) |
|:-------|:-------------|:-------------------|:--------------------|:-------------------------------|:-----------------------------------------------|
| 1 | small | 64k | 0.689M | 1.37M | 1.99 |
| 8 | large | 8k | 3.81M | 5.75M | 1.51 |
To achieve the same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (8x A100 80GB)
| GPUs | Model size | Batch size / GPU | Throughput - TF32 | Throughput - mixed precision | Average latency - TF32 [ms] | Average latency - mixed precision [ms] | Throughput speedup (mixed precision to TF32) |
|-------:|:-------------|-------------------:|:--------------------|:-------------------------------|------------------------------:|-----------------------------------------:|-----------------------------------------------:|
| 1 | small | 2048 | 1.30M | 1.31 | 1.57 | 1.56 | 1.01 |
|
PyTorch/Classification/ConvNets/efficientnet/inference/TF32 | TF32 | DGXA100_efficientnet-b4_TF32 |
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 1 --workspace ${1:-./} --raport-file raport_1.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 2 --workspace ${1:-./} --raport-file raport_2.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 4 --workspace ${1:-./} --raport-file raport_4.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 8 --workspace ${1:-./} --raport-file raport_8.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 16 --workspace ${1:-./} --raport-file raport_16.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 32 --workspace ${1:-./} --raport-file raport_32.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 64 --workspace ${1:-./} --raport-file raport_64.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 128 --workspace ${1:-./} --raport-file raport_128.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 256 --workspace ${1:-./} --raport-file raport_256.json
|
TensorFlow/Detection/SSD/models/research/slim/datasets | datasets | download_and_convert_imagenet | #!/bin/bash
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Script to download and preprocess ImageNet Challenge 2012
# training and validation data set.
#
# The final output of this script are sharded TFRecord files containing
# serialized Example protocol buffers. See build_imagenet_data.py for
# details of how the Example protocol buffers contain the ImageNet data.
#
# The final output of this script appears as such:
#
# data_dir/train-00000-of-01024
# data_dir/train-00001-of-01024
# ...
# data_dir/train-00127-of-01024
#
# and
#
# data_dir/validation-00000-of-00128
# data_dir/validation-00001-of-00128
# ...
# data_dir/validation-00127-of-00128
#
# Note that this script may take several hours to run to completion. The
# conversion of the ImageNet data to TFRecords alone takes 2-3 hours depending
# on the speed of your machine. Please be patient.
#
# **IMPORTANT**
# To download the raw images, the user must create an account with image-net.org
# and generate a username and access_key. The latter two are required for
# downloading the raw images.
#
# usage:
# cd research/slim
# bazel build :download_and_convert_imagenet
# ./bazel-bin/download_and_convert_imagenet.sh [data-dir]
set -e
if [ -z "$1" ]; then
echo "usage download_and_convert_imagenet.sh [data dir]"
exit
fi
# Create the output and temporary directories.
DATA_DIR="${1%/}"
SCRATCH_DIR="${DATA_DIR}/raw-data/"
mkdir -p "${DATA_DIR}"
mkdir -p "${SCRATCH_DIR}"
WORK_DIR="$0.runfiles/__main__"
# Download the ImageNet data.
LABELS_FILE="${WORK_DIR}/datasets/imagenet_lsvrc_2015_synsets.txt"
DOWNLOAD_SCRIPT="${WORK_DIR}/datasets/download_imagenet.sh"
"${DOWNLOAD_SCRIPT}" "${SCRATCH_DIR}" "${LABELS_FILE}"
# Note the locations of the train and validation data.
TRAIN_DIRECTORY="${SCRATCH_DIR}train/"
VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/"
# Preprocess the validation data by moving the images into the appropriate
# sub-directory based on the label (synset) of the image.
echo "Organizing the validation data into sub-directories."
PREPROCESS_VAL_SCRIPT="${WORK_DIR}/datasets/preprocess_imagenet_validation_data.py"
VAL_LABELS_FILE="${WORK_DIR}/datasets/imagenet_2012_validation_synset_labels.txt"
"${PREPROCESS_VAL_SCRIPT}" "${VALIDATION_DIRECTORY}" "${VAL_LABELS_FILE}"
# Convert the XML files for bounding box annotations into a single CSV.
echo "Extracting bounding box information from XML."
BOUNDING_BOX_SCRIPT="${WORK_DIR}/datasets/process_bounding_boxes.py"
BOUNDING_BOX_FILE="${SCRATCH_DIR}/imagenet_2012_bounding_boxes.csv"
BOUNDING_BOX_DIR="${SCRATCH_DIR}bounding_boxes/"
"${BOUNDING_BOX_SCRIPT}" "${BOUNDING_BOX_DIR}" "${LABELS_FILE}" \
| sort >"${BOUNDING_BOX_FILE}"
echo "Finished downloading and preprocessing the ImageNet data."
# Build the TFRecords version of the ImageNet data.
BUILD_SCRIPT="${WORK_DIR}/build_imagenet_data"
OUTPUT_DIRECTORY="${DATA_DIR}"
IMAGENET_METADATA_FILE="${WORK_DIR}/datasets/imagenet_metadata.txt"
"${BUILD_SCRIPT}" \
--train_directory="${TRAIN_DIRECTORY}" \
--validation_directory="${VALIDATION_DIRECTORY}" \
--output_directory="${OUTPUT_DIRECTORY}" \
--imagenet_metadata_file="${IMAGENET_METADATA_FILE}" \
--labels_file="${LABELS_FILE}" \
--bounding_box_file="${BOUNDING_BOX_FILE}"
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/bin | bin | CMakeLists | ##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
function(add_binary bin_file)
get_filename_component(bin_name "${bin_file}" NAME_WE)
add_executable(${bin_name} ${bin_file})
target_link_libraries(${bin_name} tt2i)
target_include_directories(${bin_name} PRIVATE
../trt/
../trt/util
../trt/tacotron2
../trt/waveglow
../trt/denoiser
../trt/common
)
set_property(TARGET ${bin_name} PROPERTY RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
endfunction()
# build benchmark executable
file(GLOB binaries *.cpp)
foreach (file ${binaries})
add_binary(${file})
endforeach()
|
TensorFlow2/Detection/Efficientdet/scripts/D0 | D0 | evaluate-AMP-8xA100-80G | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bs=200
ema=0.999
mkdir -p /tmp/evaluate-AMP-8xA100-80G
mpirun -np 8 --allow-run-as-root --bind-to none \
-map-by slot -x LD_LIBRARY_PATH -x PATH \
-mca pml ob1 -mca btl ^openib \
-x CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
python3 eval.py \
--val_file_pattern=/workspace/coco/val-* \
--val_json_file=/workspace/coco/annotations/instances_val2017.json \
--ckpt_path=${CKPT:-/checkpoints/emackpt-300} \
--batch_size=$bs \
--amp=True \
--hparams="moving_average_decay=$ema" \
2>&1 | tee /tmp/evaluate-AMP-8xA100-80G/eval.log |
PyTorch/Classification/GPUNet/triton/runner | runner | utils | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import shutil
import subprocess
from enum import Enum
from typing import Any
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .logger import LOGGER
def format_env_key(s: str):
"""
Format environmental variable key
Args:
s: String to format
Returns:
Upper cased string
"""
return s.upper()
def format_env_value(value: Any) -> str:
"""
Format environment variable value
Args:
value: value to be formatted
Returns:
Formatted value as a string
"""
value = value if not isinstance(value, Enum) else value.value
value = value if type(value) not in [list, tuple] else ",".join(map(str, value))
value = str(value)
return value
def get_result_path(result_path: str) -> str:
"""
Map result path when different variants passed ex. with env variable in path
Args:
result_path: Path to result file
Returns:
str
"""
for env_var, val in os.environ.items():
result_path = result_path.replace(f"${{{env_var}}}", val)
if result_path.startswith("/"):
return result_path
if result_path.startswith("./"):
result_path = result_path[2:]
return result_path
def clean_directory(directory: pathlib.Path) -> None:
"""
Remove all files and directories from directory
Args:
directory: Path to directory which should be cleaned
Returns:
None
"""
LOGGER.debug(f"Cleaning {directory.as_posix()}")
if not directory.is_dir():
LOGGER.warning(f"{directory.name} is not a directory.")
return
for item in os.listdir(directory):
item_path = directory / item
if item_path.is_dir():
LOGGER.debug(f"Remove dir {item_path.as_posix()}")
shutil.rmtree(item_path.as_posix())
elif item_path.is_file():
LOGGER.debug(f"Remove file: {item_path.as_posix()}")
item_path.unlink()
else:
LOGGER.warning(f"Cannot remove item {item_path.name}. Not a file or directory.")
def exec_command(command: Command) -> None:
"""
Execute command
Args:
command: Command to run
"""
try:
process = subprocess.Popen(
[str(command)],
shell=True,
start_new_session=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
)
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
print(output.rstrip())
LOGGER.write(output)
result = process.poll()
if result != 0:
raise RunnerException(f"Command {command} failed with exit status: {result}")
except subprocess.CalledProcessError as e:
raise RunnerException(f"Running command {e.cmd} failed with exit status {e.returncode} : {e.output}")
def measurement_env_params(measurement):
params = {}
for key, value in measurement.__dict__.items():
param = f"{measurement.__class__.__name__.upper()}_{key.upper()}"
params[param] = " ".join(list(map(lambda val: str(val), value))) if isinstance(value, list) else int(value)
return params
def offline_performance_configuration(steps, max_batch_size):
step = int(max_batch_size) // steps
batch_sizes = [step * idx for idx in range(1, steps + 1)]
concurrency = [1]
return batch_sizes, concurrency
def online_performance_configuration(steps, max_batch_size, number_of_model_instances):
max_total_requests = 2 * int(max_batch_size) * int(number_of_model_instances)
max_concurrency = min(128, max_total_requests)
step = max(1, max_concurrency // steps)
min_concurrency = step
batch_sizes = [max(1, max_total_requests // max_concurrency)]
concurrency = list(range(min_concurrency, max_concurrency + 1, step))
return batch_sizes, concurrency
|
TensorFlow/Classification/ConvNets/utils | utils | cmdline_helper | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
class ArgumentParserUtil(object):
def __init__(self, parser: argparse.ArgumentParser = None):
self.parser = parser
def build_data_parser_group(self):
data_group = self.parser.add_argument_group("Dataset arguments")
data_group.add_argument(
"--data_dir",
required=False,
default=None,
type=str,
help="Path to dataset in TFRecord format. Files should be named 'train-*' and 'validation-*'.")
data_group.add_argument("--data_idx_dir",
required=False,
default=None,
type=str,
help="Path to index files for DALI. Files should be named 'train-*' and 'validation-*'.")
data_group.add_argument("--dali",
action="store_true",
default=False,
required=False,
help="Enable DALI data input.")
data_group.add_argument("--synthetic_data_size",
required=False,
default=224,
type=int,
help="Dimension of image for synthetic dataset")
def build_training_parser_group(self):
train_group = self.parser.add_argument_group("Training arguments")
train_group.add_argument("--lr_init",
default=0.1,
type=float,
required=False,
help="Initial value for the learning rate.")
train_group.add_argument("--lr_warmup_epochs",
default=5,
type=int,
required=False,
help="Number of warmup epochs for learning rate schedule.")
train_group.add_argument("--weight_decay",
default=1e-4,
type=float,
required=False,
help="Weight Decay scale factor.")
train_group.add_argument("--weight_init",
default="fan_out",
choices=["fan_in", "fan_out"],
type=str,
required=False,
help="Model weight initialization method.")
train_group.add_argument("--momentum",
default=0.9,
type=float,
required=False,
help="SGD momentum value for the Momentum optimizer.")
train_group.add_argument("--label_smoothing",
type=float,
default=0.0,
required=False,
help="The value of label smoothing.")
train_group.add_argument("--mixup",
type=float,
default=0.0,
required=False,
help="The alpha parameter for mixup (if 0 then mixup is not applied).")
train_group.add_argument("--cosine_lr",
"--use_cosine",
"--use_cosine_lr"
"--cosine",
action="store_true",
default=False,
required=False,
help="Use cosine learning rate schedule.")
def build_generic_optimization_parser_group(self):
goptim_group = self.parser.add_argument_group("Generic optimization arguments")
goptim_group.add_argument("--xla",
"--use_xla",
action="store_true",
default=False,
required=False,
help="Enable XLA (Accelerated Linear Algebra) computation for improved performance.")
goptim_group.add_argument("--data_format",
choices=['NHWC', 'NCHW'],
type=str,
default='NHWC',
required=False,
help="Data format used to do calculations")
goptim_group.add_argument("--amp",
"--use_tf_amp",
action="store_true",
dest="amp",
default=False,
required=False,
help="Enable Automatic Mixed Precision to speedup computation using tensor cores.")
goptim_group.add_argument("--cpu",
action="store_true",
dest="cpu",
default=False,
required=False,
help="Run model on CPU instead of GPU")
amp_group = self.parser.add_argument_group("Automatic Mixed Precision arguments")
amp_group.add_argument("--static_loss_scale",
"--loss_scale",
default=-1,
required=False,
help="Use static loss scaling in FP32 AMP.")
amp_group.add_argument("--use_static_loss_scaling", required=False, action="store_true", help=argparse.SUPPRESS)
def parse_cmdline(available_arch):
p = argparse.ArgumentParser(description="JoC-RN50v1.5-TF")
p.add_argument('--arch',
choices=available_arch,
type=str,
default='resnet50',
required=False,
help="""Architecture of model to run""")
p.add_argument('--mode',
choices=[
'train', 'train_and_evaluate', 'evaluate', 'predict', 'training_benchmark', 'inference_benchmark'
],
type=str,
default='train_and_evaluate',
required=False,
help="""The execution mode of the script.""")
p.add_argument('--export_dir',
required=False,
default=None,
type=str,
help="Directory in which to write exported SavedModel.")
p.add_argument('--to_predict',
required=False,
default=None,
type=str,
help="Path to file or directory of files to run prediction on.")
p.add_argument('--batch_size', type=int, required=True, help="""Size of each minibatch per GPU.""")
p.add_argument('--num_iter', type=int, required=False, default=1, help="""Number of iterations to run.""")
p.add_argument('--run_iter',
type=int,
required=False,
default=-1,
help="""Number of training iterations to run on single run.""")
p.add_argument('--iter_unit',
choices=['epoch', 'batch'],
type=str,
required=False,
default='epoch',
help="""Unit of iterations.""")
p.add_argument(
'--warmup_steps',
default=50,
type=int,
required=False,
help="""Number of steps considered as warmup and not taken into account for performance measurements.""")
p.add_argument('--model_dir',
type=str,
required=False,
default=None,
help="""Directory in which to write model. If undefined, results dir will be used.""")
p.add_argument('--results_dir',
type=str,
required=False,
default='.',
help="""Directory in which to write training logs, summaries and checkpoints.""")
p.add_argument('--log_filename',
type=str,
required=False,
default='log.json',
help="Name of the JSON file to which write the training log")
p.add_argument('--display_every',
default=10,
type=int,
required=False,
help="""How often (in batches) to print out running information.""")
p.add_argument('--seed', type=int, default=None, help="""Random seed.""")
p.add_argument('--gpu_memory_fraction',
type=float,
default=0.7,
help="""Limit memory fraction used by training script for DALI""")
p.add_argument('--gpu_id',
type=int,
default=0,
help="""Specify ID of the target GPU on multi-device platform. Effective only for single-GPU mode.""")
p.add_argument('--finetune_checkpoint',
required=False,
default=None,
type=str,
help="Path to pre-trained checkpoint which will be used for fine-tuning")
p.add_argument("--use_final_conv",
default=False,
required=False,
action="store_true",
help="Use convolution operator instead of MLP as last layer.")
p.add_argument('--quant_delay',
type=int,
default=0,
required=False,
help="Number of steps to be run before quantization starts to happen")
p.add_argument("--quantize",
default=False,
required=False,
action="store_true",
help="Quantize weights and activations during training. (Defaults to Assymmetric quantization)")
p.add_argument("--use_qdq",
default=False,
required=False,
action="store_true",
help="Use QDQV3 op instead of FakeQuantWithMinMaxVars op for quantization. QDQv3 does only scaling")
p.add_argument("--symmetric",
default=False,
required=False,
action="store_true",
help="Quantize weights and activations during training using symmetric quantization.")
parser_util = ArgumentParserUtil(p)
parser_util.build_data_parser_group()
parser_util.build_training_parser_group()
parser_util.build_generic_optimization_parser_group()
FLAGS, unknown_args = p.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
return FLAGS
|
Kaldi/SpeechRecognition/notebooks | notebooks | README | ```
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
```
<img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
# Kaldi inference demo
## 1. Overview
This folder contains two notebooks demonstrating the steps for carrying out inferencing with the Kaldi TRTIS backend server using a Python gRPC client.
- [Offline](Kaldi_TRTIS_inference_offline_demo.ipynb): we will stream pre-recorded .wav files to the inference server and receive the results back.
- [Online](Kaldi_TRTIS_inference_online_demo.ipynb): we will stream live audio stream from a microphone to the inference server and receive the results back.
## 2. Quick Start Guide
First, clone the repository:
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/Kaldi/SpeechRecognition
```
Next, build the NVIDIA Kaldi TRTIS container:
```
scripts/docker/build.sh
```
Then download the model and some test data set with:
```
scripts/docker/launch_download.sh
```
Next, launch the TRTIS container with:
```
scripts/docker/launch_server.sh
```
After this step, we should have a TRTIS server ready to serve ASR inference requests.
The next step is to build a TRTIS client container:
```bash
docker build -t kaldi_notebook_client -f Dockerfile.notebook .
```
Start the client container with:
```bash
docker run -it --rm --net=host --device /dev/snd:/dev/snd -v $PWD:/Kaldi kaldi_notebook_client
```
Within the client container, start Jupyter notebook server:
```bash
cd /Kaldi
jupyter notebook --ip=0.0.0.0 --allow-root
```
And navigate a web browser to the IP address or hostname of the host machine
at port `8888`:
```
http://[host machine]:8888
```
Use the token listed in the output from running the `jupyter` command to log
in, for example:
```
http://[host machine]:8888/?token=aae96ae9387cd28151868fee318c3b3581a2d794f3b25c6b
```
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | resnet_v1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = tf.contrib.slim
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
@slim.add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None,
use_bounded_activations=False):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
use_bounded_activations: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=tf.nn.relu6 if use_bounded_activations else None,
scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
if use_bounded_activations:
# Use clip_by_value to simulate bandpass activation.
residual = tf.clip_by_value(residual, -6.0, 6.0)
output = tf.nn.relu6(shortcut + residual)
else:
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode. If this is set
to None, the callers can specify slim.batch_norm's is_training parameter
from an outer slim.arg_scope.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with (slim.arg_scope([slim.batch_norm], is_training=is_training)
if is_training is not None else NoOpScope()):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,
store_non_strided_activations)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_50.default_image_size = resnet_v1.default_image_size
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_101.default_image_size = resnet_v1.default_image_size
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_152.default_image_size = resnet_v1.default_image_size
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_200.default_image_size = resnet_v1.default_image_size
|
TensorFlow/Detection/SSD/examples | examples | SSD320_FP16_1GPU | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CKPT_DIR=${1:-"/results/SSD320_FP16_1GPU"}
PIPELINE_CONFIG_PATH=${2:-"/workdir/models/research/configs"}"/ssd320_full_1gpus.config"
TENSOR_OPS=0
export TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
export TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS}
time python -u ./object_detection/model_main.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--model_dir=${CKPT_DIR} \
--alsologtostder \
--amp \
"${@:3}"
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/test | test | CharacterMapping_test | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "UnitTest.hpp"
#include "characterMapping.h"
using namespace tts;
/******************************************************************************
* UNIT TEST ******************************************************************
*****************************************************************************/
TEST(MapAsciiTest)
{
const std::string text(
"printing, in the only sense with which we are at present concerned, differs "
"from most if not from all the arts and crafts represented in the exhibition in "
"being comparatively modern.");
CharacterMapping cm = CharacterMapping::defaultMapping();
const std::vector<int32_t> sequence = cm.map(text);
const std::vector<int32_t> expSequence{
53, 55, 46, 51, 57, 46, 51, 44, 6 , 11, 46, 51, 11, 57, 45, 42, 11, 52, 51, 49,
62, 11, 56, 42, 51, 56, 42, 11, 60, 46, 57, 45, 11, 60, 45, 46, 40, 45, 11, 60,
42, 11, 38, 55, 42, 11, 38, 57, 11, 53, 55, 42, 56, 42, 51, 57, 11, 40, 52, 51,
40, 42, 55, 51, 42, 41, 6, 11, 41, 46, 43, 43, 42, 55, 56, 11, 43, 55, 52, 50,
11, 50, 52, 56, 57, 11, 46, 43, 11, 51, 52, 57, 11, 43, 55, 52, 50, 11, 38, 49,
49, 11, 57, 45, 42, 11, 38, 55, 57, 56, 11, 38, 51, 41, 11, 40, 55, 38, 43, 57,
56, 11, 55, 42, 53, 55, 42, 56, 42, 51, 57, 42, 41, 11, 46, 51, 11, 57, 45, 42,
11, 42, 61, 45, 46, 39, 46, 57, 46, 52, 51, 11, 46, 51, 11, 39, 42, 46, 51, 44,
11, 40, 52, 50, 53, 38, 55, 38, 57, 46, 59, 42, 49, 62, 11, 50, 52, 41, 42, 55,
51, 7 };
ASSERT_EQ(sequence.size(), expSequence.size());
for (size_t i = 0; i < expSequence.size(); ++i) {
EXPECT_EQ(expSequence[i], sequence[i]);
}
}
TEST(MapArpabetTest)
{
const std::string text("Hello {@AE0}ther {@UW}{@AO}rld.");
CharacterMapping cm = CharacterMapping::defaultMapping();
const std::vector<int32_t> sequence = cm.map(text);
const std::vector<int32_t> expSequence{
45, 42, 49, 49, 52, 11, 69, 57, 45, 42, 55, 11, 139, 76, 55, 49, 41, 7};
ASSERT_EQ(sequence.size(), expSequence.size());
for (size_t i = 0; i < expSequence.size(); ++i) {
EXPECT_EQ(expSequence[i], sequence[i]);
}
}
|
PyTorch/Forecasting/TFT/triton | triton | run_performance_on_triton | #!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import logging
import os
import pathlib
import shutil
import sys
from distutils.version import LooseVersion
from enum import Enum
from typing import Any, Dict, List
import yaml
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .deployment_toolkit.core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool
from .deployment_toolkit.model_analyzer import ModelAnalyzer, ModelAnalyzerConfig, ModelAnalyzerMode
from .deployment_toolkit.perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.utils import parse_server_url
from .deployment_toolkit.warmup import performance_evaluation_warmup
LOGGER = logging.getLogger("run_performance_on_triton")
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
def _log_dict(title: str, dict_: Dict[str, Any]):
LOGGER.info(title)
for key, value in dict_.items():
LOGGER.info(f"\t{key} = {value}")
def _calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def _update_performance_data(results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"Batch": batch_size}
with open(performance_partial_file) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = _calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
def _model_analyzer_evaluation(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
_log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"number_of_triton_instances": number_of_triton_instances,
"number_of_model_instances": number_of_model_instances,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"concurrency_steps": concurrency_steps,
"batching_mode": batching_mode,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"model_repository": model_repository,
"result_path": result_path,
"verbose": verbose,
},
)
perf_analyzer_config = {
"measurement-interval": measurement_interval,
}
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
perf_analyzer_config["input-data"] = [input_data]
else:
perf_analyzer_config["input-data"] = input_data
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
perf_analyzer_config["measurement-mode"] = measurement_mode.value
perf_analyzer_config["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
perf_analyzer_config["shared-memory"] = offline_mode.value
perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size
if input_shapes:
if TRITON_MODEL_ANALYZER_VERSION > LooseVersion("1.8.0"):
perf_analyzer_config["shape"] = input_shapes
else:
perf_analyzer_config["shape"] = input_shapes[0]
LOGGER.warning("Model Analyzer <= 1.8.0 support only single shape param for Perf Analyzer.")
if batching_mode == BatchingMode.STATIC:
batch_sizes = batch_sizes
concurrency = [number_of_triton_instances]
elif batching_mode == BatchingMode.DYNAMIC:
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances
max_concurrency = min(256, max_total_requests)
step = max(1, max_concurrency // concurrency_steps)
min_concurrency = step
concurrency = {"start": min_concurrency, "stop": max_concurrency, "step": step}
batch_sizes = [max(1, max_total_requests // 256)]
else:
raise ValueError(f"Unsupported batching mode: {batching_mode}")
protocol, host, port = parse_server_url(server_url)
checkpoints = pathlib.Path("./checkpoints")
if checkpoints.is_dir():
shutil.rmtree(checkpoints.as_posix())
checkpoints.mkdir(parents=True, exist_ok=True)
config = {
"model_repository": model_repository,
"triton_launch_mode": "remote",
"run_config_search_disable": True,
"perf_analyzer_flags": perf_analyzer_config,
"perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h
"profile_models": [model_name],
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"verbose": verbose,
"checkpoint_directory": checkpoints.as_posix(),
"override_output_model_repository": True,
"client_protocol": protocol,
f"triton_{protocol}_endpoint": f"{host}:{port}",
}
if verbose:
_log_dict("Model Analyzer profiling configuration", config)
with open("config.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
model_analyzer = ModelAnalyzer(config=config)
model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=verbose)
result_path.mkdir(parents=True, exist_ok=True)
for file in checkpoints.iterdir():
if not file.is_file() or file.suffix != ".ckpt":
continue
LOGGER.info(f"Moving checkpoint {file.name} to {result_path}")
shutil.move(file, result_path / file.name)
inference_output_fields = [
"batch_size",
"concurrency",
"perf_throughput",
"perf_latency",
"perf_client_send_recv",
"perf_client_response_wait",
"perf_server_queue",
"perf_server_compute_input",
"perf_server_compute_infer",
"perf_server_compute_output",
]
gpu_output_fields = [
"gpu_uuid",
"batch_size",
"concurrency",
"gpu_used_memory",
"gpu_free_memory",
"gpu_utilization",
"gpu_power_usage",
]
filename_model_inference = "metrics-model-inference.csv"
filename_model_gpu = "metrics-model-gpu.csv"
config = {
"analysis_models": model_name,
"checkpoint_directory": result_path.as_posix(),
"export_path": "/tmp",
"inference_output_fields": inference_output_fields,
"gpu_output_fields": gpu_output_fields,
"filename_model_inference": filename_model_inference,
"filename_model_gpu": filename_model_gpu,
"summarize": False,
}
if verbose:
_log_dict("Model Analyzer analysis configuration", config)
with open("config.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
model_analyzer = ModelAnalyzer(config=config)
model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=verbose)
inference_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_inference
gpu_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_gpu
for file in [inference_metrics_file, gpu_metrics_file]:
LOGGER.info(f"Moving metrics {file.name} to {result_path}")
shutil.move(file, result_path / file.name)
def _perf_analyzer_evaluation(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
protocol, host, port = parse_server_url(server_url)
if batching_mode == BatchingMode.STATIC:
batch_sizes = batch_sizes
max_concurrency = 1
min_concurrency = 1
step = 1
elif batching_mode == BatchingMode.DYNAMIC:
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances
max_concurrency = min(256, max_total_requests)
step = max(1, max_concurrency // concurrency_steps)
min_concurrency = step
batch_sizes = [max(1, max_total_requests // 256)]
else:
raise ValueError(f"Unsupported batching mode: {batching_mode}")
_log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"number_of_triton_instances": number_of_triton_instances,
"number_of_model_instances": number_of_model_instances,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"concurrency_steps": concurrency_steps,
"batching_mode": batching_mode,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"result_path": result_path,
"verbose": verbose,
},
)
results: List[Dict] = list()
for batch_size in batch_sizes:
for concurrency in range(min_concurrency, max_concurrency + step, step):
performance_partial_file = f"triton_performance_{evaluation_mode.value.lower()}_{batching_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv"
params = {
"model-name": model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{host}:{port}",
"protocol": protocol,
"input-data": input_data,
"measurement-interval": measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"latency-report-file": performance_partial_file,
}
if verbose:
params["extra-verbose"] = True
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = measurement_mode.value
params["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = offline_mode.value
params["output-shared-memory-size"] = output_shared_memory_size
if verbose:
_log_dict(f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params)
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config)
perf_analyzer.run()
_update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=result_path.as_posix(), data=results)
show_results(results=results)
def _run_performance_analysis(
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
number_of_triton_instances: int,
number_of_model_instances: int,
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
concurrency_steps: int,
batching_mode: BatchingMode,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int,
performance_tool: PerformanceTool,
model_repository: str,
result_path: pathlib.Path,
warmup: bool,
verbose: bool,
):
log_level = logging.INFO if not verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
if performance_tool == PerformanceTool.MODEL_ANALYZER:
if result_path.suffix:
raise ValueError(
"Results path for Model Analyzer is invalid. Please, provide the directory name. Example: results"
)
elif performance_tool == PerformanceTool.PERF_ANALYZER:
if result_path.suffix != ".csv":
raise ValueError(
"Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv"
)
else:
raise ValueError(f"Unsupported performance tool {performance_tool}")
if warmup:
LOGGER.info("Running warmup before the main test")
performance_evaluation_warmup(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
)
if performance_tool == PerformanceTool.MODEL_ANALYZER:
LOGGER.info("Using Model Analyzer for performance evaluation")
_model_analyzer_evaluation(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency_steps=concurrency_steps,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
model_repository=model_repository,
result_path=result_path,
verbose=verbose,
)
elif performance_tool == PerformanceTool.PERF_ANALYZER:
LOGGER.info("Using Perf Analyzer for performance evaluation")
_perf_analyzer_evaluation(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
number_of_triton_instances=number_of_triton_instances,
number_of_model_instances=number_of_model_instances,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency_steps=concurrency_steps,
batching_mode=batching_mode,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
result_path=result_path,
verbose=verbose,
)
else:
raise ValueError(f"Unsupported performance tool {performance_tool}")
class MeasurementMode(Enum):
"""
Available measurement stabilization modes
"""
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--server-url",
type=str,
required=False,
default="http://127.0.0.1:8000",
help="Url to Triton server",
)
parser.add_argument(
"--model-name",
type=str,
required=True,
help="Name of the model to test",
)
parser.add_argument(
"--input-data",
type=str,
required=False,
default="random",
help="Input data to perform profiling.",
)
parser.add_argument(
"--input-shapes",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument(
"--batch-sizes",
type=str,
required=True,
help="List of batch sizes to tests. Comma separated.",
)
parser.add_argument(
"--number-of-triton-instances",
type=int,
default=1,
help="Number of Triton Server instances",
)
parser.add_argument(
"--number-of-model-instances",
type=int,
default=1,
help="Number of models instances on Triton Server",
)
parser.add_argument(
"--measurement-mode",
choices=[item.value for item in MeasurementMode],
default=MeasurementMode.COUNT_WINDOWS.value,
type=str,
help="Select measurement mode "
"'time_windows' stabilize performance on measurement window. "
"'count_windows' stabilize performance on number of samples.",
)
parser.add_argument(
"--measurement-interval",
required=False,
help="Time window perf_analyzer will wait to stabilize the measurement",
default=5000,
type=int,
)
parser.add_argument(
"--measurement-request-count",
required=False,
help="Number of samples on which perf_analyzer will stabilize the measurement",
default=50,
type=int,
)
parser.add_argument(
"--concurrency-steps",
help="Define number of concurrency steps used for dynamic batching tests",
default=32,
type=int,
)
parser.add_argument(
"--batching-mode",
choices=[item.value for item in BatchingMode],
default=BatchingMode.STATIC.value,
type=str,
help="Select batching mode "
"'static' run static batching scenario. "
"'dynamic' run dynamic batching scenario.",
)
parser.add_argument(
"--evaluation-mode",
choices=[item.value for item in EvaluationMode],
default=EvaluationMode.OFFLINE.value,
type=str,
help="Select evaluation mode "
"'offline' run offline analysis and use GPU memory to pass tensors. "
"'online' run online analysis and use HTTP protocol.",
)
parser.add_argument(
"--offline-mode",
choices=[item.value for item in OfflineMode],
default=OfflineMode.SYSTEM.value,
type=str,
help="Select offline mode "
"'system' pass tensors through CPU RAM memory. "
"'cuda' pass tensors through GPU RAM memory.",
)
parser.add_argument(
"--output-shared-memory-size",
default=100240,
type=int,
help="Size of memory buffer allocated for output with dynamic shapes in bytes. "
"Has to be equal to maximal size of output tensor.",
)
parser.add_argument(
"--performance-tool",
choices=[item.value for item in PerformanceTool],
default=PerformanceTool.MODEL_ANALYZER.value,
type=str,
help="Select performance tool for measurement mode "
"'model_analyzer' use Model Analyzer "
"'perf_analyzer' use Perf Analyzer",
)
parser.add_argument(
"--model-repository",
default=None,
type=str,
help="Path to model repository. Valid when using Model Analyzer",
)
parser.add_argument("--result-path", type=pathlib.Path, required=True, help="Path where results files is stored.")
parser.add_argument(
"--warmup", help="Enable model warmup before performance test", action="store_true", default=False
)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args = parser.parse_args()
batch_sizes = list(map(lambda x: int(x), args.batch_sizes.split(",")))
_run_performance_analysis(
server_url=args.server_url,
model_name=args.model_name,
input_data=args.input_data,
input_shapes=args.input_shapes or [],
batch_sizes=batch_sizes,
number_of_triton_instances=args.number_of_triton_instances,
number_of_model_instances=args.number_of_model_instances,
measurement_mode=MeasurementMode(args.measurement_mode),
measurement_interval=args.measurement_interval,
measurement_request_count=args.measurement_request_count,
concurrency_steps=args.concurrency_steps,
batching_mode=BatchingMode(args.batching_mode),
evaluation_mode=EvaluationMode(args.evaluation_mode),
offline_mode=OfflineMode(args.offline_mode),
output_shared_memory_size=args.output_shared_memory_size,
performance_tool=PerformanceTool(args.performance_tool),
model_repository=args.model_repository,
result_path=args.result_path,
warmup=args.warmup,
verbose=args.verbose,
)
if __name__ == "__main__":
main()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/bin | bin | build_waveglow | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "cudaUtils.h"
#include "engineCache.h"
#include "logging.h"
#include "waveGlowBuilder.h"
#include "NvInfer.h"
#include <iostream>
#include <memory>
using namespace nvinfer1;
using namespace tts;
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
bool matches(const std::string& arg, const std::string& flag)
{
return arg.length() >= flag.length() && arg.substr(0, flag.length()) == flag;
}
int parseNumFlag(
const int argc, const char** argv, const std::string& flag, int* i)
{
int value;
const std::string arg(argv[*i]);
if (arg.length() > flag.length()) {
value = std::stol(arg.substr(flag.length()));
} else if (*i + 1 < argc) {
++(*i);
value = std::stol(argv[*i]);
} else {
throw std::runtime_error("Missing argument for '" + flag + "'.");
}
return value;
}
int parseAmpFlag(
const int argc, const char** argv, const std::string& flag, int* i)
{
std::string str;
const std::string arg(argv[*i]);
if (arg.length() > flag.length()) {
str = arg.substr(flag.length());
} else if (*i + 1 < argc) {
++(*i);
str = argv[*i];
} else {
throw std::runtime_error("Missing argument for '" + flag + "'.");
}
int value;
if (str == "fp32") {
value = 0;
} else if (str == "amp") {
value = 1;
} else {
throw std::runtime_error(
"Invalid argument for precision (amp|fp32): " + str);
}
return value;
}
void usage(const std::string& binName)
{
std::cerr << "usage: " << std::endl;
std::cerr << " " << binName << " <model file> <engine file> [options]\n";
std::cerr << "options:" << std::endl;
std::cerr << " -B<batch size>" << std::endl;
std::cerr << " -F<precision (fp32|amp)>" << std::endl;
std::cerr << " -h" << std::endl;
}
void parseArgs(
const int argc,
const char** const argv,
std::string* model,
std::string* enginePath,
int* batchSize,
int* useAMP)
{
bool modelSet = false;
bool enginePathSet = false;
for (int i = 1; i < argc; ++i) {
const std::string arg(argv[i]);
if (matches(arg, "-B")) {
*batchSize = parseNumFlag(argc, argv, "-B", &i);
} else if (matches(arg, "-F")) {
*useAMP = parseAmpFlag(argc, argv, "-F", &i);
} else if (matches(arg, "-h")) {
usage(argv[0]);
exit(0);
} else {
if (!modelSet) {
*model = arg;
modelSet = true;
} else if (!enginePathSet) {
*enginePath = arg;
enginePathSet = true;
} else {
throw std::runtime_error("Unknown extra argument '" + arg + "'.");
}
}
}
}
/******************************************************************************
* MAIN ***********************************************************************
*****************************************************************************/
int main(int argc, const char* argv[])
{
std::string waveglowModelPath;
std::string enginePath;
int batchSize = 1;
int useFP16 = true;
parseArgs(argc, argv, &waveglowModelPath, &enginePath, &batchSize, &useFP16);
if (waveglowModelPath.empty() || enginePath.empty()) {
usage(argv[0]);
return 1;
}
CudaUtils::printDeviceInformation();
try {
std::shared_ptr<Logger> logger(new Logger(ILogger::Severity::kERROR));
TRTPtr<IBuilder> builder(createInferBuilder(*logger));
EngineCache cache(logger);
WaveGlowBuilder waveglowBuilder(waveglowModelPath, logger);
const TRTPtr<ICudaEngine> wgEng
= waveglowBuilder.build(*builder, batchSize, useFP16);
cache.save(*wgEng, enginePath);
} catch (const std::exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
return 1;
}
return 0;
}
|
PyTorch/Recommendation/DLRM/dlrm/scripts | scripts | prepare_synthetic_dataset | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from dlrm.data.datasets import SyntheticDataset
from dlrm.data.utils import write_dataset_to_disk
from dlrm.data.feature_spec import FeatureSpec
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("synthetic_dataset_num_entries",
default=int(32768 * 1024), # 1024 batches for single-GPU training by default
help="Number of samples per epoch for the synthetic dataset")
flags.DEFINE_integer("num_numerical_features", default=13,
help="Number of numerical features in the dataset. Defaults to 13 for the Criteo Terabyte Dataset")
flags.DEFINE_list("synthetic_dataset_table_sizes", default=','.join(26 * [str(10 ** 5)]),
help="Cardinality of each categorical feature")
flags.DEFINE_string("feature_spec", default=None,
help="Feature specification file describing the desired dataset."
"Only feature_spec and channel_spec sections are required and used."
"Overrides num_numerical_features and synthetic_dataset_table_sizes")
flags.DEFINE_string("synthetic_dataset_dir", default="/tmp/dlrm_synthetic_data",
help="Destination of the saved synthetic dataset")
flags.DEFINE_integer("seed", default=12345, help="Set a seed for generating synthetic data")
def main(argv):
torch.manual_seed(FLAGS.seed)
number_of_entries = FLAGS.synthetic_dataset_num_entries
if FLAGS.feature_spec is not None:
fspec = FeatureSpec.from_yaml(FLAGS.feature_spec)
else:
cardinalities = [int(s) for s in FLAGS.synthetic_dataset_table_sizes]
fspec = FeatureSpec.get_default_feature_spec(number_of_numerical_features=FLAGS.num_numerical_features,
categorical_feature_cardinalities=cardinalities)
fspec.base_directory = FLAGS.synthetic_dataset_dir
fspec.check_feature_spec()
number_of_numerical_features = fspec.get_number_of_numerical_features()
categorical_feature_sizes = fspec.get_categorical_sizes()
train_dataset = SyntheticDataset(
num_entries=number_of_entries,
numerical_features=number_of_numerical_features,
categorical_feature_sizes=categorical_feature_sizes
)
test_dataset = SyntheticDataset(
num_entries=number_of_entries,
numerical_features=number_of_numerical_features,
categorical_feature_sizes=categorical_feature_sizes
)
write_dataset_to_disk(
dataset_train=train_dataset,
dataset_test=test_dataset,
feature_spec=fspec
)
if __name__ == '__main__':
app.run(main)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | decoderBuilderPlugins | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "decoderBuilderPlugins.h"
#include "decoderInstance.h"
#include "dims5.h"
#include "engineCache.h"
#include "pluginBuilder.h"
#include "trtUtils.h"
#include <stdexcept>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const char* const INPUT_DROPOUT_NAME = DecoderInstance::INPUT_DROPOUT_NAME;
constexpr const char* const INPUT_LASTFRAME_NAME = DecoderInstance::INPUT_LASTFRAME_NAME;
constexpr const char* const INPUT_MEMORY_NAME = DecoderInstance::INPUT_MEMORY_NAME;
constexpr const char* const INPUT_PROCESSED_NAME = DecoderInstance::INPUT_PROCESSED_NAME;
constexpr const char* const INPUT_WEIGHTS_NAME = DecoderInstance::INPUT_WEIGHTS_NAME;
constexpr const char* const INPUT_CONTEXT_NAME = DecoderInstance::INPUT_CONTEXT_NAME;
constexpr const char* const INPUT_ATTENTIONHIDDEN_NAME = DecoderInstance::INPUT_ATTENTIONHIDDEN_NAME;
constexpr const char* const INPUT_ATTENTIONCELL_NAME = DecoderInstance::INPUT_ATTENTIONCELL_NAME;
constexpr const char* const INPUT_DECODERHIDDEN_NAME = DecoderInstance::INPUT_DECODERHIDDEN_NAME;
constexpr const char* const INPUT_DECODERCELL_NAME = DecoderInstance::INPUT_DECODERCELL_NAME;
constexpr const char* const OUTPUT_ATTENTIONHIDDEN_NAME = DecoderInstance::OUTPUT_ATTENTIONHIDDEN_NAME;
constexpr const char* const OUTPUT_ATTENTIONCELL_NAME = DecoderInstance::OUTPUT_ATTENTIONCELL_NAME;
constexpr const char* const OUTPUT_CONTEXT_NAME = DecoderInstance::OUTPUT_CONTEXT_NAME;
constexpr const char* const OUTPUT_WEIGHTS_NAME = DecoderInstance::OUTPUT_WEIGHTS_NAME;
constexpr const char* const OUTPUT_DECODERHIDDEN_NAME = DecoderInstance::OUTPUT_DECODERHIDDEN_NAME;
constexpr const char* const OUTPUT_DECODERCELL_NAME = DecoderInstance::OUTPUT_DECODERCELL_NAME;
constexpr const char* const OUTPUT_CHANNELS_NAME = DecoderInstance::OUTPUT_CHANNELS_NAME;
constexpr const char* const OUTPUT_GATE_NAME = DecoderInstance::OUTPUT_GATE_NAME;
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
void configureDims(const INetworkDefinition* const network, IOptimizationProfile* optProfile,
const std::string& inputName, const int maxBatchSize, const int minInputLength, const int maxInputLength,
const int optInputLength)
{
for (int inputIdx = 0; inputIdx < network->getNbInputs(); ++inputIdx)
{
const ITensor* const input = network->getInput(inputIdx);
if (std::string(input->getName()) == inputName)
{
const Dims defDims = input->getDimensions();
Dims maxDims = defDims;
Dims minDims = defDims;
Dims optDims = defDims;
bool foundBatch = false;
bool foundLength = false;
for (int d = 0; d < defDims.nbDims; ++d)
{
if (defDims.d[d] == -1)
{
if (!foundBatch)
{
maxDims.d[d] = maxBatchSize;
minDims.d[d] = 1;
optDims.d[d] = 1;
foundBatch = true;
}
else if (!foundLength)
{
maxDims.d[d] = maxInputLength;
minDims.d[d] = minInputLength;
optDims.d[d] = optInputLength;
foundLength = true;
}
else
{
throw std::runtime_error("Unknown third dynamic dimension: " + std::to_string(d));
}
}
}
if (!foundBatch || !foundLength)
{
throw std::runtime_error("Failed to find all dynamic dimensions");
}
if (!optProfile->setDimensions(inputName.c_str(), OptProfileSelector::kMIN, minDims))
{
throw std::runtime_error("Failed to set minimum dimensions of " + TRTUtils::dimsToString(minDims)
+ " for " + inputName + ".");
}
if (!optProfile->setDimensions(inputName.c_str(), OptProfileSelector::kMAX, maxDims))
{
throw std::runtime_error("Failed to set maximum dimensions of " + TRTUtils::dimsToString(maxDims)
+ " for " + inputName + ".");
}
if (!optProfile->setDimensions(inputName.c_str(), OptProfileSelector::kOPT, optDims))
{
throw std::runtime_error("Failed to set optimal dimensions of " + TRTUtils::dimsToString(optDims)
+ " for " + inputName + ".");
}
// success
return;
}
}
throw std::runtime_error("Unable to find input: '" + inputName + "'.");
}
void configureDefaultDims(const INetworkDefinition* const network, IOptimizationProfile* optProfile,
const std::string& inputName, const int maxBatchSize)
{
for (int inputIdx = 0; inputIdx < network->getNbInputs(); ++inputIdx)
{
const ITensor* const input = network->getInput(inputIdx);
if (std::string(input->getName()) == inputName)
{
const Dims defDims = input->getDimensions();
Dims maxDims = defDims;
Dims minDims = defDims;
Dims optDims = defDims;
bool foundBatch = false;
for (int d = 0; d < defDims.nbDims; ++d)
{
if (defDims.d[d] == -1)
{
if (!foundBatch)
{
maxDims.d[d] = maxBatchSize;
minDims.d[d] = 1;
optDims.d[d] = 1;
foundBatch = true;
}
else
{
throw std::runtime_error(
"Unknown second dynamic dimension for " + inputName + ": " + std::to_string(d));
}
}
}
if (!foundBatch)
{
throw std::runtime_error("Failed to find all dynamic dimensions");
}
if (!optProfile->setDimensions(inputName.c_str(), OptProfileSelector::kMIN, minDims))
{
throw std::runtime_error("Failed to set minimum dimensions of " + TRTUtils::dimsToString(minDims)
+ " for " + inputName + ".");
}
if (!optProfile->setDimensions(inputName.c_str(), OptProfileSelector::kMAX, maxDims))
{
throw std::runtime_error("Failed to set maximum dimensions of " + TRTUtils::dimsToString(maxDims)
+ " for " + inputName + ".");
}
if (!optProfile->setDimensions(inputName.c_str(), OptProfileSelector::kOPT, optDims))
{
throw std::runtime_error("Failed to set optimal dimensions of " + TRTUtils::dimsToString(optDims)
+ " for " + inputName + ".");
}
// success
return;
}
}
throw std::runtime_error("Unable to find input: '" + inputName + "'.");
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
DecoderBuilderPlugins::DecoderBuilderPlugins(const int numDim, const int numChannels)
: mNumEncodingDim(numDim)
, mNumPrenetDim(256)
, mNumAttentionRNNDim(1024)
, mNumAttentionDim(128)
, mNumAttentionFilters(32)
, mAttentionKernelSize(31)
, mNumLSTMDim(1024)
, mNumChannels(numChannels)
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
TRTPtr<ICudaEngine> DecoderBuilderPlugins::build(
IBuilder& builder,
IModelImporter& importer,
const int maxBatchSize,
const int minInputLength,
const int maxInputLength,
const bool useFP16)
{
if (maxBatchSize > 1)
{
throw std::runtime_error(
"DecoderBuilderPlugins only supports batch size of 1: " + std::to_string(maxBatchSize));
}
TRTPtr<INetworkDefinition> network(builder.createNetworkV2(
1U << static_cast<int>(
NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
network->setName("Tacotron2_DecoderWithPlugins");
// PRENET ///////////////////////////////////////////////////////////////////
ITensor* prenetInput = network->addInput(INPUT_LASTFRAME_NAME, DataType::kFLOAT, Dims4{-1, mNumChannels + 1, 1, 1});
ITensor* dropoutInput = network->addInput(INPUT_DROPOUT_NAME, DataType::kFLOAT, Dims4{-1, mNumPrenetDim, 1, 1});
const LayerData* const prenetData1 = importer.getWeights({"decoder", "prenet", "layers", "0", "linear_layer"});
const LayerData* const prenetData2 = importer.getWeights({"decoder", "prenet", "layers", "1", "linear_layer"});
PluginBuilder prenetBuilder("Taco2Prenet", "0.1.0");
prenetBuilder.setField("InputLength", mNumChannels);
prenetBuilder.setField("Dimension", mNumPrenetDim);
prenetBuilder.setField("weight1", prenetData1->get("weight"));
prenetBuilder.setField("weight2", prenetData2->get("weight"));
TRTPtr<IPluginV2> prenet = prenetBuilder.make("decoder.prenet");
std::vector<ITensor*> prenetInputs{prenetInput, dropoutInput};
ILayer* const prenetLayer
= network->addPluginV2(prenetInputs.data(), static_cast<int>(prenetInputs.size()), *prenet);
prenetLayer->setName("decoder.prenet");
ITensor* const prenetOutput = prenetLayer->getOutput(0);
// ATTENTION LSTM ///////////////////////////////////////////////////////////
ITensor* const attentionContextInput
= network->addInput(INPUT_CONTEXT_NAME, DataType::kFLOAT, Dims3{-1, 1, mNumEncodingDim});
ITensor* const attentionRNNHidden
= network->addInput(INPUT_ATTENTIONHIDDEN_NAME, DataType::kFLOAT, Dims3{-1, 1, mNumAttentionRNNDim});
ITensor* const attentionRNNCell
= network->addInput(INPUT_ATTENTIONCELL_NAME, DataType::kFLOAT, Dims3{-1, 1, mNumAttentionRNNDim});
const LayerData* const lstmData = importer.getWeights({"decoder", "attention_rnn"});
std::vector<ITensor*> attentionLSTMInputs{
prenetOutput, attentionContextInput, attentionRNNHidden, attentionRNNCell};
PluginBuilder attLSTMCellBuilder("Taco2LSTMCell", "0.1.0");
attLSTMCellBuilder.setField("Length",
static_cast<int32_t>(
TRTUtils::getTensorSize(*attentionLSTMInputs[0]) + TRTUtils::getTensorSize(*attentionLSTMInputs[1])));
attLSTMCellBuilder.setField("Dimension", mNumAttentionRNNDim);
attLSTMCellBuilder.setField("FP16", static_cast<int32_t>(useFP16));
attLSTMCellBuilder.setField("weight_ih", lstmData->get("weight_ih"));
attLSTMCellBuilder.setField("weight_hh", lstmData->get("weight_hh"));
attLSTMCellBuilder.setField("bias_ih", lstmData->get("bias_ih"));
attLSTMCellBuilder.setField("bias_hh", lstmData->get("bias_hh"));
TRTPtr<IPluginV2> attentionLSTM
= attLSTMCellBuilder.make("decoder.attention_rnn");
ILayer* const attentionLSTMLayer = network->addPluginV2(
attentionLSTMInputs.data(), static_cast<int>(attentionLSTMInputs.size()), *attentionLSTM);
ITensor* const attentionHiddenOut = attentionLSTMLayer->getOutput(0);
ITensor* const attentionCellOut = attentionLSTMLayer->getOutput(1);
attentionLSTMLayer->setName("decoder.attention_rnn");
attentionHiddenOut->setName(OUTPUT_ATTENTIONHIDDEN_NAME);
network->markOutput(*attentionHiddenOut);
attentionCellOut->setName(OUTPUT_ATTENTIONCELL_NAME);
network->markOutput(*attentionCellOut);
// ATTENTION ////////////////////////////////////////////////////////////////
ITensor* const inputMemory = network->addInput(INPUT_MEMORY_NAME, DataType::kFLOAT, Dims3(-1, -1, mNumEncodingDim));
ITensor* const inputProcessedMemory
= network->addInput(INPUT_PROCESSED_NAME, DataType::kFLOAT, Dims5(-1, -1, mNumAttentionDim, 1, 1));
ITensor* const inputWeights = network->addInput(INPUT_WEIGHTS_NAME, DataType::kFLOAT, Dims4(-1, 2, -1, 1));
const LayerData* const queryData
= importer.getWeights({"decoder", "attention_layer", "query_layer", "linear_layer"});
const LayerData* const locationConvData
= importer.getWeights({"decoder", "attention_layer", "location_layer", "location_conv", "conv"});
const LayerData* const locationLinearData
= importer.getWeights({"decoder", "attention_layer", "location_layer", "location_dense", "linear_layer"});
const LayerData* const energyData = importer.getWeights({"decoder", "attention_layer", "v", "linear_layer"});
std::vector<ITensor*> attentionInputs{inputMemory, inputProcessedMemory, inputWeights, attentionHiddenOut};
PluginBuilder attBuilder("Taco2Attention", "0.1.0");
attBuilder.setField("EncodingDimension", mNumEncodingDim);
attBuilder.setField("QueryDimension", mNumAttentionRNNDim);
attBuilder.setField("NumFilters", mNumAttentionFilters);
attBuilder.setField("ConvKernelSize", mAttentionKernelSize);
attBuilder.setField("AttentionDimension", mNumAttentionDim);
attBuilder.setField("QueryWeight", queryData->get("weight"));
attBuilder.setField("ConvWeight", locationConvData->get("weight"));
attBuilder.setField("LocationWeight", locationLinearData->get("weight"));
attBuilder.setField("EnergyWeight", energyData->get("weight"));
TRTPtr<IPluginV2> attention = attBuilder.make("decoder.attention_layer");
ILayer* const attentionLayer
= network->addPluginV2(attentionInputs.data(), static_cast<int>(attentionInputs.size()), *attention);
attentionLayer->setName("decoder.attention_layer");
ITensor* const attentionContextOutput = attentionLayer->getOutput(0);
ITensor* const attentionWeightOutput = attentionLayer->getOutput(1);
attentionWeightOutput->setName(OUTPUT_WEIGHTS_NAME);
network->markOutput(*attentionWeightOutput);
attentionContextOutput->setName(OUTPUT_CONTEXT_NAME);
network->markOutput(*attentionContextOutput);
// DECODER LSTM /////////////////////////////////////////////////////////////
ITensor* const inputDecoderHidden
= network->addInput(INPUT_DECODERHIDDEN_NAME, DataType::kFLOAT, Dims3{-1, 1, mNumLSTMDim});
ITensor* const inputDecoderCell
= network->addInput(INPUT_DECODERCELL_NAME, DataType::kFLOAT, Dims3{-1, 1, mNumLSTMDim});
const LayerData* const decoderLSTMData = importer.getWeights({"decoder", "decoder_rnn"});
std::vector<ITensor*> decoderLSTMInputs{
attentionHiddenOut, attentionContextOutput, inputDecoderHidden, inputDecoderCell};
PluginBuilder decoderLSTMCellBuilder("Taco2LSTMCell", "0.1.0");
decoderLSTMCellBuilder.setField("Length",
static_cast<int32_t>(
TRTUtils::getTensorSize(*decoderLSTMInputs[0]) + TRTUtils::getTensorSize(*decoderLSTMInputs[1])));
decoderLSTMCellBuilder.setField("Dimension", mNumLSTMDim);
decoderLSTMCellBuilder.setField("FP16", static_cast<int32_t>(useFP16));
decoderLSTMCellBuilder.setField("weight_ih", decoderLSTMData->get("weight_ih"));
decoderLSTMCellBuilder.setField("weight_hh", decoderLSTMData->get("weight_hh"));
decoderLSTMCellBuilder.setField("bias_ih", decoderLSTMData->get("bias_ih"));
decoderLSTMCellBuilder.setField("bias_hh", decoderLSTMData->get("bias_hh"));
TRTPtr<IPluginV2> decoderLSTM
= decoderLSTMCellBuilder.make("decoder.decoder_rnn");
ILayer* const decoderLSTMLayer
= network->addPluginV2(decoderLSTMInputs.data(), static_cast<int>(decoderLSTMInputs.size()), *decoderLSTM);
decoderLSTMLayer->setName("decoder.decoder_rnn");
ITensor* const decoderHiddenOut = decoderLSTMLayer->getOutput(0);
ITensor* const decoderCellOut = decoderLSTMLayer->getOutput(1);
decoderHiddenOut->setName(OUTPUT_DECODERHIDDEN_NAME);
network->markOutput(*decoderHiddenOut);
decoderCellOut->setName(OUTPUT_DECODERCELL_NAME);
network->markOutput(*decoderCellOut);
// PROJECTION ///////////////////////////////////////////////////////////////
const LayerData* const channelData = importer.getWeights({"decoder", "linear_projection", "linear_layer"});
const LayerData* const gateData = importer.getWeights({"decoder", "gate_layer", "linear_layer"});
PluginBuilder projBuilder("Taco2Projection", "0.1.0");
projBuilder.setField("HiddenInputLength", static_cast<int32_t>(TRTUtils::getTensorSize(*decoderHiddenOut)));
projBuilder.setField("ContextInputLength", static_cast<int32_t>(TRTUtils::getTensorSize(*attentionContextOutput)));
projBuilder.setField("ChannelDimension", mNumChannels);
projBuilder.setField("GateDimension", 1);
projBuilder.setField("ChannelWeights", channelData->get("weight"));
projBuilder.setField("GateWeights", gateData->get("weight"));
projBuilder.setField("ChannelBias", channelData->get("bias"));
projBuilder.setField("GateBias", gateData->get("bias"));
TRTPtr<IPluginV2> proj
= projBuilder.make("decoder.linear_projection.linear_layer");
std::vector<ITensor*> projInputs{decoderHiddenOut, attentionContextOutput};
ILayer* const projLayer = network->addPluginV2(projInputs.data(), static_cast<int>(projInputs.size()), *proj);
projLayer->setName("decoder.linear_projection.linear_layer");
ITensor* const outputChannels = projLayer->getOutput(0);
outputChannels->setName(OUTPUT_CHANNELS_NAME);
network->markOutput(*outputChannels);
TRTPtr<IBuilderConfig> config(builder.createBuilderConfig());
config->setMaxWorkspaceSize(1ULL << 29); // 512 MB
if (useFP16)
{
config->setFlag(BuilderFlag::kFP16);
}
builder.setMaxBatchSize(maxBatchSize);
IOptimizationProfile* const optProfile = builder.createOptimizationProfile();
// the optimimum input length should actually matter, so we'll just take
// the average
const int optInputLength = (minInputLength + maxInputLength) / 2;
// memory dimensions
configureDims(
network.get(), optProfile, INPUT_MEMORY_NAME, maxBatchSize, minInputLength, maxInputLength, optInputLength);
// processed memory dimensions
configureDims(
network.get(), optProfile, INPUT_PROCESSED_NAME, maxBatchSize, minInputLength, maxInputLength, optInputLength);
// weights dimensions
configureDims(
network.get(), optProfile, INPUT_WEIGHTS_NAME, maxBatchSize, minInputLength, maxInputLength, optInputLength);
// set the batch dimension on the rest
configureDefaultDims(network.get(), optProfile, INPUT_DROPOUT_NAME, maxBatchSize);
configureDefaultDims(network.get(), optProfile, INPUT_LASTFRAME_NAME, maxBatchSize);
configureDefaultDims(network.get(), optProfile, INPUT_CONTEXT_NAME, maxBatchSize);
configureDefaultDims(network.get(), optProfile, INPUT_ATTENTIONHIDDEN_NAME, maxBatchSize);
configureDefaultDims(network.get(), optProfile, INPUT_ATTENTIONCELL_NAME, maxBatchSize);
configureDefaultDims(network.get(), optProfile, INPUT_DECODERHIDDEN_NAME, maxBatchSize);
configureDefaultDims(network.get(), optProfile, INPUT_DECODERCELL_NAME, maxBatchSize);
config->addOptimizationProfile(optProfile);
TRTPtr<ICudaEngine> engine(
builder.buildEngineWithConfig(*network, *config));
if (!engine)
{
throw std::runtime_error("Failed to build Tacotron2::DecoderPlugins engine.");
}
return engine;
}
} // namespace tts
|
PyTorch/Translation/Transformer/fairseq | fairseq | multiprocessing_pdb | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import multiprocessing
import os
import pdb
import sys
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
_stdin_fd = sys.stdin.fileno()
_stdin = None
_stdin_lock = multiprocessing.Lock()
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with self._stdin_lock:
try:
if not self._stdin:
self._stdin = os.fdopen(self._stdin_fd)
sys.stdin = self._stdin
self.cmdloop()
finally:
sys.stdin = stdin_bak
pdb = MultiprocessingPdb()
|
TensorFlow2/LanguageModeling/ELECTRA | ELECTRA | configuration_utils | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import logging
import os
from typing import Dict, Optional, Tuple
from utils import log
from file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_remote_url
logger = logging.getLogger(__name__)
class PretrainedConfig(object):
r""" Base class for all configuration classes.
Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights.
It only affects the model's configuration.
Class attributes (overridden by derived classes):
- ``pretrained_config_archive_map``: a python ``dict`` with `shortcut names` (string) as keys and `url` (string) of associated pretrained model configurations as values.
- ``model_type``: a string that identifies the model type, that we serialize into the JSON file, and that we use to recreate the correct object in :class:`~transformers.AutoConfig`.
Args:
finetuning_task (:obj:`string` or :obj:`None`, `optional`, defaults to :obj:`None`):
Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.
num_labels (:obj:`int`, `optional`, defaults to `2`):
Number of classes to use when the model is a classification model (sequences/tokens)
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Should the model returns attentions weights.
output_hidden_states (:obj:`string`, `optional`, defaults to :obj:`False`):
Should the model returns all hidden-states.
torchscript (:obj:`bool`, `optional`, defaults to :obj:`False`):
Is the model used with Torchscript (for PyTorch models).
"""
pretrained_config_archive_map = {} # type: Dict[str, str]
model_type = "" # type: str
def __init__(self, **kwargs):
# Attributes with defaults
self.output_attentions = kwargs.pop("output_attentions", False)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_past = kwargs.pop("output_past", True) # Not used by all models
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.num_labels = kwargs.pop("num_labels", 2)
self.id2label = kwargs.pop("id2label", {i: "LABEL_{}".format(i) for i in range(self.num_labels)})
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
self.label2id = kwargs.pop("label2id", dict(zip(self.id2label.values(), self.id2label.keys())))
self.label2id = dict((key, int(value)) for key, value in self.label2id.items())
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# TPU arguments
self.xla_device = kwargs.pop("xla_device", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
log("Can't set {} with value {} for {}".format(key, value, self))
raise err
@property
def num_labels(self):
return self._num_labels
@num_labels.setter
def num_labels(self, num_labels):
self._num_labels = num_labels
self.id2label = {i: "LABEL_{}".format(i) for i in range(self.num_labels)}
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
self.label2id = dict((key, int(value)) for key, value in self.label2id.items())
def save_pretrained(self, save_directory):
"""
Save a configuration object to the directory `save_directory`, so that it
can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`string`):
Directory where the configuration JSON file will be saved.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
log("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration.
Args:
pretrained_model_name_or_path (:obj:`string`):
either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache or
download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to
our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.:
``./my_model_directory/configuration.json``.
cache_dir (:obj:`string`, `optional`):
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
kwargs (:obj:`Dict[str, any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is
controlled by the `return_unused_kwargs` keyword parameter.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Force to (re-)download the model weights and configuration files and override the cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies (:obj:`Dict`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.:
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`
The proxies are used on each request.
return_unused_kwargs: (`optional`) bool:
If False, then this function returns just the final configuration object.
If True, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part
of kwargs which has not been used to update `config` and is otherwise ignored.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
assert config.output_attention == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
foo=False, return_unused_kwargs=True)
assert config.output_attention == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: str, pretrained_config_archive_map: Optional[Dict] = None, **kwargs
) -> Tuple[Dict, Dict]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used
for instantiating a Config using `from_dict`.
Parameters:
pretrained_model_name_or_path (:obj:`string`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
pretrained_config_archive_map: (:obj:`Dict[str, str]`, `optional`) Dict:
A map of `shortcut names` to `url`. By default, will use the current class attribute.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary that will be used to instantiate the configuration object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
if pretrained_config_archive_map is None:
pretrained_config_archive_map = cls.pretrained_config_archive_map
if pretrained_model_name_or_path in pretrained_config_archive_map:
config_file = pretrained_config_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(pretrained_model_name_or_path, postfix=CONFIG_NAME)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_config_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file
)
else:
msg = (
"Can't load '{}'. Make sure that:\n\n"
"- '{}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
"- or '{}' is the correct path to a directory containing a '{}' file\n\n".format(
pretrained_model_name_or_path,
pretrained_model_name_or_path,
pretrained_model_name_or_path,
CONFIG_NAME,
)
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
"Couldn't reach server at '{}' to download configuration file or "
"configuration file is not a valid JSON file. "
"Please check network or file content here: {}.".format(config_file, resolved_config_file)
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
log("loading configuration file {}".format(config_file))
else:
log("loading configuration file {} from cache at {}".format(config_file, resolved_config_file))
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict, **kwargs) -> "PretrainedConfig":
"""
Constructs a `Config` from a Python dictionary of parameters.
Args:
config_dict (:obj:`Dict[str, any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved
from a pre-trained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict`
method.
kwargs (:obj:`Dict[str, any]`):
Additional parameters from which to initialize the configuration object.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
# log("Model config {}".format(str(config)))
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: str) -> "PretrainedConfig":
"""
Constructs a `Config` from the path to a json file of parameters.
Args:
json_file (:obj:`string`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: An instance of a configuration object
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: str):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.to_json_string())
def to_dict(self):
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
return output
def to_json_string(self):
"""
Serializes this instance to a JSON string.
Returns:
:obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.
"""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
"""
Save this instance to a json file.
Args:
json_file_path (:obj:`string`):
Path to the JSON file in which this configuration instance's parameters will be saved.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
def update(self, config_dict: Dict):
"""
Updates attributes of this class
with attributes from `config_dict`.
Args:
:obj:`Dict[str, any]`: Dictionary of attributes that shall be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
"bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
"bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
"bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
"bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
"bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
"bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
"bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
"bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
"bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
"bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
"bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json",
"bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json",
"bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-config.json",
"bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-config.json",
"bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-config.json",
"bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-config.json",
"bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/config.json",
"bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/config.json",
"bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/config.json",
}
class BertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
It is used to instantiate an BERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30522):
Vocabulary size of the BERT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
hidden_size (:obj:`int`, optional, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, optional, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
Example::
from transformers import BertModel, BertConfig
# Initializing a BERT bert-base-uncased style configuration
configuration = BertConfig()
# Initializing a model from the bert-base-uncased style configuration
model = BertModel(configuration)
# Accessing the model configuration
configuration = model.config
Attributes:
pretrained_config_archive_map (Dict[str, str]):
A dictionary containing all the available pre-trained checkpoints.
"""
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "bert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps |
PyTorch/LanguageModeling/BERT | BERT | create_pretraining_data | # coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import h5py
import numpy as np
from tqdm import tqdm, trange
from tokenization import BertTokenizer
import tokenization as tokenization
import random
import collections
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
s += "is_random_next: %s\n" % self.is_random_next
s += "masked_lm_positions: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions]))
s += "masked_lm_labels: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_file(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_file):
"""Create TF example files from `TrainingInstance`s."""
total_written = 0
features = collections.OrderedDict()
num_instances = len(instances)
features["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
features["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
features["next_sentence_labels"] = np.zeros(num_instances, dtype="int32")
for inst_index, instance in enumerate(tqdm(instances)):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features["input_ids"][inst_index] = input_ids
features["input_mask"][inst_index] = input_mask
features["segment_ids"][inst_index] = segment_ids
features["masked_lm_positions"][inst_index] = masked_lm_positions
features["masked_lm_ids"][inst_index] = masked_lm_ids
features["next_sentence_labels"][inst_index] = next_sentence_label
total_written += 1
# if inst_index < 20:
# tf.logging.info("*** Example ***")
# tf.logging.info("tokens: %s" % " ".join(
# [tokenization.printable_text(x) for x in instance.tokens]))
# for feature_name in features.keys():
# feature = features[feature_name]
# values = []
# if feature.int64_list.value:
# values = feature.int64_list.value
# elif feature.float_list.value:
# values = feature.float_list.value
# tf.logging.info(
# "%s: %s" % (feature_name, " ".join([str(x) for x in values])))
print("saving data")
f= h5py.File(output_file, 'w')
f.create_dataset("input_ids", data=features["input_ids"], dtype='i4', compression='gzip')
f.create_dataset("input_mask", data=features["input_mask"], dtype='i1', compression='gzip')
f.create_dataset("segment_ids", data=features["segment_ids"], dtype='i1', compression='gzip')
f.create_dataset("masked_lm_positions", data=features["masked_lm_positions"], dtype='i4', compression='gzip')
f.create_dataset("masked_lm_ids", data=features["masked_lm_ids"], dtype='i4', compression='gzip')
f.create_dataset("next_sentence_labels", data=features["next_sentence_labels"], dtype='i1', compression='gzip')
f.flush()
f.close()
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
print("creating instance from {}".format(input_file))
with open(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
#If picked random document is the same as the current document
if random_document_index == document_index:
is_random_next = False
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--vocab_file",
default=None,
type=str,
required=True,
help="The vocabulary the BERT model will train on.")
parser.add_argument("--input_file",
default=None,
type=str,
required=True,
help="The input train corpus. can be directory with .txt files or a path to a single file")
parser.add_argument("--output_file",
default=None,
type=str,
required=True,
help="The output file where the model checkpoints will be written.")
## Other parameters
# str
parser.add_argument("--bert_model", default="bert-large-uncased", type=str, required=False,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
#int
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--dupe_factor",
default=10,
type=int,
help="Number of times to duplicate the input data (with different masks).")
parser.add_argument("--max_predictions_per_seq",
default=20,
type=int,
help="Maximum sequence length.")
# floats
parser.add_argument("--masked_lm_prob",
default=0.15,
type=float,
help="Masked LM probability.")
parser.add_argument("--short_seq_prob",
default=0.1,
type=float,
help="Probability to create a sequence shorter than maximum sequence length")
parser.add_argument("--do_lower_case",
action='store_true',
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument('--random_seed',
type=int,
default=12345,
help="random seed for initialization")
args = parser.parse_args()
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512)
input_files = []
if os.path.isfile(args.input_file):
input_files.append(args.input_file)
elif os.path.isdir(args.input_file):
input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if (os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt') )]
else:
raise ValueError("{} is not a valid path".format(args.input_file))
rng = random.Random(args.random_seed)
instances = create_training_instances(
input_files, tokenizer, args.max_seq_length, args.dupe_factor,
args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq,
rng)
output_file = args.output_file
write_instance_to_example_file(instances, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, output_file)
if __name__ == "__main__":
main()
|
PyTorch/Segmentation/nnUNet/notebooks | notebooks | custom_dataset | #!/usr/bin/env python
# coding: utf-8
# # nnUNet for custom dataset
# # Table of contents
# - [Introduction](#introduction)
# - [Model](#model)
# - [Model creation](#model-creation)
# - [Metric](#metric)
# - [Dataset](#dataset)
# - [Visualization](#visualization)
# - [Data loading](#dataloader)
# - [Running the model on a custom dataset](#custom)
# - [Training](#training)
# - [Inference](#inference)
# ## Introduction <a name="introduction"></a>
#
# In our repository nnUNet is used for [Medical Segmentation Decathlon](http://medicaldecathlon.com/) dataset. However, you can apply it to any dataset for the image segmentation task. In this notebook you will learn what parts of code needs to be changed to customize nnUNet.
#
# For demonstration purposes we will use the satellite imagery from [xBD](https://arxiv.org/abs/1911.09296) dataset which was used at the [xView2](https://xview2.org) challenge. The goal of the contest was to build an accurate and efficient model for building localization and damage classification. The xBD provides pre and post event satellite imagery across a variety of disaster events. The contests had two tasks corresponding to the image type:
# - pre - localize buildings with 0, 1 segmentation mask.
# - post - classify buildings damage with classes: 1, 2, 3, 4.
#
# In this notebook we will focus on the building localization part i.e. pre disaster images.
#
# To download the dataset you have to create an account at the [challenge website](https://xview2.org).
# ## Model <a name="model"></a>
#
# The [nnUNet](https://arxiv.org/abs/1904.08128) refers to a robust and self-adapting framework for UNet based medical image segmentation. It allows segmenting 2D and 3D images with high accuracy and efficiency.
#
# Based on the dataset properties like image shapes and pixel spacings it dynamically creates UNet architecture by selecting a number of layers together with appropriate kernel sizes and strides. During data preprocessing we create *config.pkl* file with the metadata necessary for creating the UNet architecture. If the data preprocessing part is skipped the *config.pkl* file needs to be created manually. It contains dictionary with fields:
#
# - `patch_size` - shape of cropped image during training
# - `spacings` - pixel spacings
# - `n_class` - number of classes
# - `in_channels` - number of input channels
# In[1]:
import os
import pickle
PATH = "/data/11_2d"
pickle.dump(
{
"patch_size": [512, 512],
"spacings": [1, 1],
"n_class": 2,
"in_channels": 3,
},
open(os.path.join(PATH, "config.pkl"), "wb"),
)
# ### Model creation <a name="model-creation"></a>
#
# Normally, we pass model parameters as command line arguments for `main.py` script, when running nnUNet in jupyter notebook you can pass them as a string to `get_main_args` function which returns the *Namespace* necessary to initialize the model.
#
# In this examples the parameters are `--task 11 --dim 2 --deep_supervision --data2d_dim 2 --tta --norm batch`, where:
# - `task` - number of task to run (tasks 01-10 are reserved for MSD). The full path to data location is inferred from it by: /data/{task}_{dim}d
# - `dim` - dimensionality of UNet
# - `data2d_dim` - dimensionality of input data from data loader. (For MSD tasks we get 3D data from data loaders also for 2D UNet and transform its layout before feeding it to the network)
# - `deep_supervision` - enables deep supervision
# - `tta` - enables test time augmentation
# - `norm` - normalization layer (by default instance normalization is used)
#
# For full list of command line arguments parameter see [here](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/nnUNet#command-line-options).
# In[2]:
import sys; sys.path.append("../")
from models.nn_unet import NNUnet
from utils.utils import get_main_args
params = "--task 11 --dim 2 --deep_supervision --data2d_dim 2 --tta --norm batch"
args = get_main_args(params)
model = NNUnet(args)
# ### Metric <a name="metric"></a>
#
# It is possible to customize your metrics as well. In our nnUNet repo we use the mean dice as the final metric. However, in xView2 global F1 score were used. Below we provide implementation of global F1 score and are overriding the model metric with custom f1 score.
# In[3]:
from pytorch_lightning.metrics import Metric
import torch
import torch.nn as nn
class F1(Metric):
def __init__(self):
super().__init__(dist_sync_on_step=False)
self.add_state("tp", default=torch.zeros((1,)), dist_reduce_fx="sum")
self.add_state("fp", default=torch.zeros((1,)), dist_reduce_fx="sum")
self.add_state("fn", default=torch.zeros((1,)), dist_reduce_fx="sum")
def update(self, preds, targets):
preds = torch.argmax(preds, dim=1)
true_pos, false_neg, false_pos = self.get_stats(preds, targets, 1)
self.tp[0] += true_pos
self.fn[0] += false_neg
self.fp[0] += false_pos
def compute(self):
return 200 * self.tp / (2 * self.tp + self.fp + self.fn)
@staticmethod
def get_stats(pred, targ, class_idx):
true_pos = torch.logical_and(pred == class_idx, targ == class_idx).sum()
false_neg = torch.logical_and(pred != class_idx, targ == class_idx).sum()
false_pos = torch.logical_and(pred == class_idx, targ != class_idx).sum()
return true_pos, false_neg, false_pos
f1_score = F1()
model.dice = f1_score
# ## Dataset <a name="dataset"></a>
#
# The xBD is the largest building damage assessment dataset to date, containing 850.736 building annotations across 45.362 km2 of imagery. There are 9168 and 933 images in the training and validation set respectively where each image has shape (1024, 1024, 3).
#
#
# In this notebook we assume the dataset has the following structure:
#
# ```
# /data/11_2d
# │
# ├───train
# │ ├── images
# │ │ └── <image_id>.png
# │ │ └── ...
# │ └── targets
# │ └── <image_id>.png
# │ └── ...
# └────val
# ├── images
# │ └── <image_id>.png
# │ └── ...
# └── targets
# └── <image_id>.png
# └── ...
# ```
# ### Visualization <a name="visualization"></a>
#
# Let's start with visualization of some images and their corresponding labels.
# In[4]:
import cv2
import matplotlib.pyplot as plt
from glob import glob
# Loading pre images which correspond to localization task.
imgs = sorted(glob(os.path.join(PATH, "train", "images", f"*pre*")))
lbls = sorted(glob(os.path.join(PATH, "train", "targets", f"*pre*")))
for idx in [1385, 5560, 408, 6897]:
fig, ax = plt.subplots(nrows=1, ncols=2)
fig.set_figheight(15)
fig.set_figwidth(15)
c1, c2 = ax
img, lbl = cv2.imread(imgs[idx]), cv2.imread(lbls[idx], cv2.IMREAD_UNCHANGED)
for c, p in [(c1, img), (c2, lbl)]:
c.axes.xaxis.set_visible(False)
c.axes.yaxis.set_visible(False)
c.imshow(p)
fig.tight_layout()
plt.show()
# ### Data loading <a name="dataloader"></a>
#
# In our nnUNet repository we are converting data to npy format and use [NVIDIA DALI](https://docs.nvidia.com/deeplearning/dali/master-user-guide/docs/index.html) for data loading. However, you can modify this part and create your own data loading pipeline.
#
# In this example we use PyTorch DataLoader with *zoom*, *crop*, *flips*, *gaussian noise*, *gamma*, *brightness* and *contrast* for data augmentation from [albumentations](https://albumentations.ai) library.
#
# As our implementation of nnUNet is based on [PyTorch-Lightning](https://pytorch-lightning.readthedocs.io/en/stable/) we need to create *LightningDataModule* to wrap the data loaders.
# In[5]:
from torch.utils.data import DataLoader, Dataset
from pytorch_lightning import LightningDataModule
import albumentations as A
import numpy as np
class xBDTrainDataset(Dataset):
def __init__(self, path):
self.imgs = sorted(glob(os.path.join(path, "images", f"*pre*")))
self.lbls = sorted(glob(os.path.join(path, "targets", f"*pre*")))
assert len(self.imgs) == len(self.lbls)
self.zoom = A.RandomScale(p=0.2, scale_limit=(0, 0.3), interpolation=cv2.INTER_CUBIC)
self.crop = A.CropNonEmptyMaskIfExists(p=1, width=512, height=512)
self.hflip = A.HorizontalFlip(p=0.33)
self.vflip = A.VerticalFlip(p=0.33)
self.noise = A.GaussNoise(p=0.1)
self.brctr = A.RandomBrightnessContrast(p=0.2)
self.gamma = A.RandomGamma(p=0.2)
self.normalize = A.Normalize()
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
img, lbl = self.load_pair(idx)
data = {"image": img, "mask": lbl}
data = self.zoom(image=data["image"], mask=data["mask"])
data = self.crop(image=data["image"], mask=data["mask"])
data = self.hflip(image=data["image"], mask=data["mask"])
data = self.vflip(image=data["image"], mask=data["mask"])
img, lbl = data["image"], data["mask"]
img = self.noise(image=img)["image"]
img = self.brctr(image=img)["image"]
img = self.gamma(image=img)["image"]
img = self.normalize(image=img)["image"]
lbl = np.expand_dims(lbl, 0)
return {"image": np.transpose(img, (2, 0, 1)), "label": lbl}
def load_pair(self, idx):
img = cv2.imread(self.imgs[idx])
lbl = cv2.imread(self.lbls[idx], cv2.IMREAD_UNCHANGED)
return img, lbl
class xBDValDataset(Dataset):
def __init__(self, path):
self.imgs = sorted(glob(os.path.join(path, "images", f"*pre*")))
self.lbls = sorted(glob(os.path.join(path, "targets", f"*pre*")))
assert len(self.imgs) == len(self.lbls)
self.normalize = A.Normalize()
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
img, lbl = self.load_pair(idx)
img = self.normalize(image=img)["image"]
lbl = np.expand_dims(lbl, 0)
return {"image": np.transpose(img, (2, 0, 1)), "label": lbl}
def load_pair(self, idx):
img = cv2.imread(self.imgs[idx])
lbl = cv2.imread(self.lbls[idx], cv2.IMREAD_UNCHANGED)
return img, lbl
class DataModule(LightningDataModule):
def __init__(self, data_path, batch_size):
super().__init__()
self.data_path = data_path
self.train_dataset = xBDTrainDataset(os.path.join(self.data_path, "train"))
self.val_dataset = xBDValDataset(os.path.join(self.data_path, "val"))
self.loader_kwargs = {
"batch_size": batch_size,
"pin_memory": True,
"num_workers": 8,
}
def train_dataloader(self):
return DataLoader(self.train_dataset, drop_last=True, shuffle=True, **self.loader_kwargs)
def val_dataloader(self):
return DataLoader(self.val_dataset, **self.loader_kwargs)
data_module = DataModule("/data/11_2d", batch_size=32)
# ## Running the model on a custom dataset <a name="custom"></a>
#
# Now we are all set to start training nnUNet on xBD dataset.
#
# ### Training <a name="training"></a>
#
# Thanks to PyTorch Lightning we can very easily train with AMP or multigpu - just pass *precision=16* and *gpus=NGPU* to the lightning Trainer.
# In[6]:
from pytorch_lightning import Trainer
trainer = Trainer(
gpus=1,
precision=16,
benchmark=True,
max_epochs=350,
num_sanity_val_steps=0,
progress_bar_refresh_rate=0,
default_root_dir=args.results,
)
trainer.fit(model, data_module);
# ### Inference <a name="inference"></a>
#
# As a final step lets run an inference and visualize the predicted masks from trained nnUNet.
# In[7]:
model = NNUnet.load_from_checkpoint("/results/checkpoints/last.ckpt", strict=False, map_location={"cuda:0": "cpu"})
normalize = A.Normalize()
idx = [1385, 6897]
im, lb = [], []
for i in idx:
img = np.transpose(normalize(image=cv2.imread(imgs[i]))["image"], (2, 0, 1))
im.append(torch.tensor(img))
lb.append(cv2.imread(lbls[i], cv2.IMREAD_UNCHANGED))
img = torch.tensor(np.stack(im))
model = model.eval()
out = model(img)
preds = np.argmax(out.detach().numpy(), 1)
for i in range(2):
fig, ax = plt.subplots(nrows=1, ncols=3)
fig.set_figheight(15)
fig.set_figwidth(15)
c1, c2, c3 = ax
img, pred, lbl = cv2.imread(imgs[idx[i]]), preds[i], lb[i]
for a, (c, p) in enumerate([(c1, img), (c2, lbl), (c3, pred)]):
c.axes.xaxis.set_visible(False)
c.axes.yaxis.set_visible(False)
if i == 0:
c.title.set_text(["image", "ground truth", "prediction"][a])
c.imshow(p)
fig.tight_layout()
plt.show()
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer | perf_analyzer | __init__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import PerfAnalyzerRunner # noqa: F401
from .warmup import PerfAnalyzerWarmupRunner # noqa: F401
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection | object_detection | region_similarity_calculator | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta, abstractmethod
import tensorflow as tf
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(a=y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(a=y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(a=x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(a=x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
class RegionSimilarityCalculator:
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overriden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return iou(boxlist1, boxlist2)
|
PyTorch/Recommendation/DLRM/tests/transcoding | transcoding | small_csv | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
feature_spec:
cat_0.bin:
cardinality: 10
cat_1.bin:
cardinality: 23412
cat_2.bin:
cardinality: 45000
cat_3.bin:
cardinality: 100
cat_4.bin:
cardinality: 50
cat_5.bin:
cardinality: 127
label: {}
num_0: {}
num_1: {}
num_2: {}
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: csv
- features:
- label
files:
- test/label.bin
type: csv
- features:
- cat_0.bin
- cat_1.bin
files:
- test/catpart1.bin
type: csv
- features:
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
files:
- test/catpart2.bin
type: csv
train:
- features: *id001
files:
- train/numerical.bin
type: csv
- features:
- label
files:
- train/label.bin
type: csv
- features:
- cat_0.bin
- cat_1.bin
- cat_2.bin
files:
- train/catpart0.bin
type: csv
- features:
- cat_3.bin
- cat_4.bin
- cat_5.bin
files:
- train/catpart1.bin
type: csv
|
PyTorch/Recommendation/DLRM/dlrm/cuda_src/sparse_gather | sparse_gather | gather_gpu | #include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <math.h>
#include <cassert>
#include <iostream>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
// For simplicity reason, boundry checks are removed
// All the kernels MUST be launched with grid size = batch size and block size = embedding size
__global__ void GatherKernel(const float* params,
int64_t num_features,
int embed_size,
int batch_size,
int query_nnz,
const int64_t* indices,
float* ret) {
int tid = threadIdx.x, bid = blockIdx.x;
extern __shared__ int shmem_indices[];
// each CTA load one row of indices in the mini batch into shared memory
for (int i = tid; i < query_nnz; i += blockDim.x) {
shmem_indices[i] = indices[query_nnz * bid + i];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < query_nnz; ++i) {
// printf("%d, %d, %d\n", bid, i, shmem_indices[i]);
ret[(bid * query_nnz + i) * embed_size + tid] =
params[(int64_t)shmem_indices[i] * embed_size + tid];
}
}
__global__ void OneHotKernel(const float* params,
int64_t num_features,
int embed_size,
int batch_size,
const int64_t* indices,
float* ret) {
int tid = threadIdx.x, bid = blockIdx.x;
ret[bid * embed_size + tid] = params[(int64_t)indices[bid] * embed_size + tid];
}
// grads is used to update params directly by atomic instead of forming wgrad
// Only SGD without momentum and without weight decay is supported
__global__ void GatherBackwardFuseSgdKernel(const float* grads,
int64_t num_features,
int embed_size,
int batch_size,
int query_nnz,
const int64_t* indices,
float lr,
float* params) {
int tid = threadIdx.x, bid = blockIdx.x;
extern __shared__ int shmem_indices[];
for (int i = tid; i < query_nnz; i += blockDim.x) {
shmem_indices[i] = indices[query_nnz * bid + i];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < query_nnz; ++i) {
atomicAdd(¶ms[(int64_t)shmem_indices[i] * embed_size + tid],
-lr * grads[(bid * query_nnz + i) * embed_size + tid]);
}
}
// Keep the interface and argument name as torch.embedding()
// input is indices, and weight is embedding table
torch::Tensor gather_gpu_fwd(const torch::Tensor weight, const torch::Tensor indices) {
AT_ASSERT(indices.is_cuda());
AT_ASSERT(weight.is_cuda());
AT_ASSERT(indices.scalar_type() == torch::ScalarType::Long);
AT_ASSERT(weight.scalar_type() == torch::ScalarType::Float);
AT_ASSERT(weight.is_contiguous());
int batch_size = indices.size(0);
int query_nnz = 1;
if (indices.dim() > 1) {
query_nnz = indices.size(1);
}
// Shared memory size limit. Larger nnz can also be supported by skipping shared memory if necessary
TORCH_CHECK(query_nnz <= 12288, "Embedding width must be smaller than 48k");
int num_features = weight.size(0);
int embed_size = weight.size(1);
// Block dimension limit. Large than 1024 width can be easily supported by letting each block read
// from different strides if necessary.
TORCH_CHECK(embed_size <= 1024, "Embedding width must be smaller than 1024");
auto outputs =
torch::empty(batch_size * query_nnz * embed_size, at::device(at::kCUDA).dtype(at::kFloat));
if (query_nnz != 1) {
GatherKernel<<<batch_size,
embed_size,
query_nnz * sizeof(int),
at::cuda::getCurrentCUDAStream()>>>(weight.data_ptr<float>(),
num_features,
embed_size,
batch_size,
query_nnz,
indices.contiguous().data_ptr<int64_t>(),
outputs.data_ptr<float>());
} else {
OneHotKernel<<<batch_size, embed_size, 0, at::cuda::getCurrentCUDAStream()>>>(
weight.data_ptr<float>(),
num_features,
embed_size,
batch_size,
indices.contiguous().data_ptr<int64_t>(),
outputs.data_ptr<float>());
}
return outputs.reshape({batch_size, query_nnz, embed_size});
}
// Because complication of handling sparse tensor, use the native backward function is still faster
// TODO(haow): Figure out a way to write out sparse tensor directly to avoid addintional copy which makes
// customized implementation slower than Pytorch's own desipte kernels are more efficient
torch::Tensor gather_gpu_bwd(const torch::Tensor grad,
const torch::Tensor indices,
const int num_features) {
return at::embedding_sparse_backward(grad, indices, num_features, /*padding_idx=*/-1, /*scale_grad_by_freq=*/false);
}
// Backward gather with fused plain SGD (no weight decay nor momentum)
void gather_gpu_bwd_fuse_sgd(const torch::Tensor grad,
const torch::Tensor indices,
float lr,
torch::Tensor weight) {
AT_ASSERT(grad.is_cuda());
AT_ASSERT(indices.is_cuda());
AT_ASSERT(weight.is_cuda());
AT_ASSERT(grad.scalar_type() == torch::ScalarType::Float);
AT_ASSERT(indices.scalar_type() == torch::ScalarType::Long);
AT_ASSERT(weight.scalar_type() == torch::ScalarType::Float);
AT_ASSERT(weight.is_contiguous());
int batch_size = indices.size(0);
int query_nnz = 1;
if (indices.dim() > 1) {
query_nnz = indices.size(1);
}
int num_features = weight.size(0);
int embed_size = weight.size(1);
GatherBackwardFuseSgdKernel<<<batch_size,
embed_size,
query_nnz * sizeof(int),
at::cuda::getCurrentCUDAStream()>>>(
grad.contiguous().data_ptr<float>(),
num_features,
embed_size,
batch_size,
query_nnz,
indices.contiguous().data_ptr<int64_t>(),
lr,
weight.data_ptr<float>());
}
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model_dataset | model_dataset | tft_electricity | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
model:
config:
n_head: 4
hidden_size: 128
dropout: 0.1
attn_dropout: 0
trainer:
config:
batch_size: 1024
num_epochs: 20
gradient_norm: 1.0
optimizer:
lr: .001
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs/quick_schedules | quick_schedules | e2e_faster_rcnn_R_50_C4_quick | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50"
RPN:
PRE_NMS_TOP_N_TEST: 6000
POST_NMS_TOP_N_TEST: 1000
ROI_HEADS:
BATCH_SIZE_PER_IMAGE: 256
DATASETS:
TRAIN: ("coco_2014_minival",)
TEST: ("coco_2014_minival",)
INPUT:
MIN_SIZE_TRAIN: 600
MAX_SIZE_TRAIN: 1000
MIN_SIZE_TEST: 800
MAX_SIZE_TEST: 1000
SOLVER:
BASE_LR: 0.005
WEIGHT_DECAY: 0.0001
STEPS: (1500,)
MAX_ITER: 2000
IMS_PER_BATCH: 2
TEST:
IMS_PER_BATCH: 2
|
PyTorch/LanguageModeling/BERT | BERT | tokenization | # coding=utf-8
# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
import six
from io import open
from file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
raise ValueError(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
PyTorch/SpeechSynthesis/Tacotron2/tensorrt | tensorrt | test_infer_trt | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
sys.path.append('./')
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import numpy as np
from scipy.io.wavfile import write
from inference import checkpoint_from_distributed, unwrap_distributed, MeasureTime, prepare_input_sequence, load_and_setup_model
from inference_trt import infer_tacotron2_trt, infer_waveglow_trt
from trt_utils import load_engine
import tensorrt as trt
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from apex import amp
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--encoder', type=str, required=True,
help='full path to the Encoder engine')
parser.add_argument('--decoder', type=str, required=True,
help='full path to the DecoderIter engine')
parser.add_argument('--postnet', type=str, required=True,
help='full path to the Postnet engine')
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow engine')
parser.add_argument('--waveglow-ckpt', type=str, default="",
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--fp16', action='store_true',
help='inference with FP16')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--num-iters', type=int, default=10,
help='Number of iterations')
parser.add_argument('-il', '--input-length', type=int, default=64,
help='Input length')
parser.add_argument('-bs', '--batch-size', type=int, default=1,
help='Batch size')
return parser
def print_stats(measurements_all):
print(np.mean(measurements_all['latency'][1:]),
np.mean(measurements_all['throughput'][1:]),
np.mean(measurements_all['pre_processing'][1:]),
np.mean(measurements_all['type_conversion'][1:])+
np.mean(measurements_all['storage'][1:])+
np.mean(measurements_all['data_transfer'][1:]),
np.mean(measurements_all['num_mels_per_audio'][1:]))
throughput = measurements_all['throughput']
preprocessing = measurements_all['pre_processing']
type_conversion = measurements_all['type_conversion']
storage = measurements_all['storage']
data_transfer = measurements_all['data_transfer']
postprocessing = [sum(p) for p in zip(type_conversion,storage,data_transfer)]
latency = measurements_all['latency']
num_mels_per_audio = measurements_all['num_mels_per_audio']
latency.sort()
cf_50 = max(latency[:int(len(latency)*0.50)])
cf_90 = max(latency[:int(len(latency)*0.90)])
cf_95 = max(latency[:int(len(latency)*0.95)])
cf_99 = max(latency[:int(len(latency)*0.99)])
cf_100 = max(latency[:int(len(latency)*1.0)])
print("Throughput average (samples/sec) = {:.4f}".format(np.mean(throughput)))
print("Preprocessing average (seconds) = {:.4f}".format(np.mean(preprocessing)))
print("Postprocessing average (seconds) = {:.4f}".format(np.mean(postprocessing)))
print("Number of mels per audio average = {}".format(np.mean(num_mels_per_audio))) #
print("Latency average (seconds) = {:.4f}".format(np.mean(latency)))
print("Latency std (seconds) = {:.4f}".format(np.std(latency)))
print("Latency cl 50 (seconds) = {:.4f}".format(cf_50))
print("Latency cl 90 (seconds) = {:.4f}".format(cf_90))
print("Latency cl 95 (seconds) = {:.4f}".format(cf_95))
print("Latency cl 99 (seconds) = {:.4f}".format(cf_99))
print("Latency cl 100 (seconds) = {:.4f}".format(cf_100))
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
measurements_all = {"pre_processing": [],
"tacotron2_encoder_time": [],
"tacotron2_decoder_time": [],
"tacotron2_postnet_time": [],
"tacotron2_latency": [],
"waveglow_latency": [],
"latency": [],
"type_conversion": [],
"data_transfer": [],
"storage": [],
"tacotron2_items_per_sec": [],
"waveglow_items_per_sec": [],
"num_mels_per_audio": [],
"throughput": []}
print("args:", args, unknown_args)
torch.cuda.init()
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
encoder = load_engine(args.encoder, TRT_LOGGER)
decoder_iter = load_engine(args.decoder, TRT_LOGGER)
postnet = load_engine(args.postnet, TRT_LOGGER)
waveglow = load_engine(args.waveglow, TRT_LOGGER)
if args.waveglow_ckpt != "":
# setup denoiser using WaveGlow PyTorch checkpoint
waveglow_ckpt = load_and_setup_model('WaveGlow', parser,
args.waveglow_ckpt,
fp16_run=args.fp16,
cpu_run=False,
forward_is_infer=True)
denoiser = Denoiser(waveglow_ckpt).cuda()
# after initialization, we don't need WaveGlow PyTorch checkpoint
# anymore - deleting
del waveglow_ckpt
torch.cuda.empty_cache()
# create TRT contexts for each engine
encoder_context = encoder.create_execution_context()
decoder_context = decoder_iter.create_execution_context()
postnet_context = postnet.create_execution_context()
waveglow_context = waveglow.create_execution_context()
texts = ["The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves."]
texts = [texts[0][:args.input_length]]
texts = texts*args.batch_size
warmup_iters = 3
for iter in range(args.num_iters):
measurements = {}
with MeasureTime(measurements, "pre_processing"):
sequences_padded, input_lengths = prepare_input_sequence(texts)
sequences_padded = sequences_padded.to(torch.int32)
input_lengths = input_lengths.to(torch.int32)
with torch.no_grad():
with MeasureTime(measurements, "latency"):
with MeasureTime(measurements, "tacotron2_latency"):
mel, mel_lengths = infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences_padded, input_lengths, measurements, args.fp16)
with MeasureTime(measurements, "waveglow_latency"):
audios = infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, args.fp16)
num_mels = mel.size(0)*mel.size(2)
num_samples = audios.size(0)*audios.size(1)
with MeasureTime(measurements, "type_conversion"):
audios = audios.float()
with MeasureTime(measurements, "data_transfer"):
audios = audios.cpu()
with MeasureTime(measurements, "storage"):
audios = audios.numpy()
for i, audio in enumerate(audios):
audio_path = "audio_"+str(i)+".wav"
write(audio_path, args.sampling_rate,
audio[:mel_lengths[i]*args.stft_hop_length])
measurements['tacotron2_items_per_sec'] = num_mels/measurements['tacotron2_latency']
measurements['waveglow_items_per_sec'] = num_samples/measurements['waveglow_latency']
measurements['num_mels_per_audio'] = mel.size(2)
measurements['throughput'] = num_samples/measurements['latency']
if iter >= warmup_iters:
for k,v in measurements.items():
if k in measurements_all.keys():
measurements_all[k].append(v)
DLLogger.log(step=(iter-warmup_iters), data={k: v})
DLLogger.flush()
print_stats(measurements_all)
if __name__ == '__main__':
main()
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/bert | bert | vocab | [PAD]
[unused0]
[unused1]
[unused2]
[unused3]
[unused4]
[unused5]
[unused6]
[unused7]
[unused8]
[unused9]
[unused10]
[unused11]
[unused12]
[unused13]
[unused14]
[unused15]
[unused16]
[unused17]
[unused18]
[unused19]
[unused20]
[unused21]
[unused22]
[unused23]
[unused24]
[unused25]
[unused26]
[unused27]
[unused28]
[unused29]
[unused30]
[unused31]
[unused32]
[unused33]
[unused34]
[unused35]
[unused36]
[unused37]
[unused38]
[unused39]
[unused40]
[unused41]
[unused42]
[unused43]
[unused44]
[unused45]
[unused46]
[unused47]
[unused48]
[unused49]
[unused50]
[unused51]
[unused52]
[unused53]
[unused54]
[unused55]
[unused56]
[unused57]
[unused58]
[unused59]
[unused60]
[unused61]
[unused62]
[unused63]
[unused64]
[unused65]
[unused66]
[unused67]
[unused68]
[unused69]
[unused70]
[unused71]
[unused72]
[unused73]
[unused74]
[unused75]
[unused76]
[unused77]
[unused78]
[unused79]
[unused80]
[unused81]
[unused82]
[unused83]
[unused84]
[unused85]
[unused86]
[unused87]
[unused88]
[unused89]
[unused90]
[unused91]
[unused92]
[unused93]
[unused94]
[unused95]
[unused96]
[unused97]
[unused98]
[UNK]
[CLS]
[SEP]
[MASK]
[unused99]
[unused100]
[unused101]
[unused102]
[unused103]
[unused104]
[unused105]
[unused106]
[unused107]
[unused108]
[unused109]
[unused110]
[unused111]
[unused112]
[unused113]
[unused114]
[unused115]
[unused116]
[unused117]
[unused118]
[unused119]
[unused120]
[unused121]
[unused122]
[unused123]
[unused124]
[unused125]
[unused126]
[unused127]
[unused128]
[unused129]
[unused130]
[unused131]
[unused132]
[unused133]
[unused134]
[unused135]
[unused136]
[unused137]
[unused138]
[unused139]
[unused140]
[unused141]
[unused142]
[unused143]
[unused144]
[unused145]
[unused146]
[unused147]
[unused148]
[unused149]
[unused150]
[unused151]
[unused152]
[unused153]
[unused154]
[unused155]
[unused156]
[unused157]
[unused158]
[unused159]
[unused160]
[unused161]
[unused162]
[unused163]
[unused164]
[unused165]
[unused166]
[unused167]
[unused168]
[unused169]
[unused170]
[unused171]
[unused172]
[unused173]
[unused174]
[unused175]
[unused176]
[unused177]
[unused178]
[unused179]
[unused180]
[unused181]
[unused182]
[unused183]
[unused184]
[unused185]
[unused186]
[unused187]
[unused188]
[unused189]
[unused190]
[unused191]
[unused192]
[unused193]
[unused194]
[unused195]
[unused196]
[unused197]
[unused198]
[unused199]
[unused200]
[unused201]
[unused202]
[unused203]
[unused204]
[unused205]
[unused206]
[unused207]
[unused208]
[unused209]
[unused210]
[unused211]
[unused212]
[unused213]
[unused214]
[unused215]
[unused216]
[unused217]
[unused218]
[unused219]
[unused220]
[unused221]
[unused222]
[unused223]
[unused224]
[unused225]
[unused226]
[unused227]
[unused228]
[unused229]
[unused230]
[unused231]
[unused232]
[unused233]
[unused234]
[unused235]
[unused236]
[unused237]
[unused238]
[unused239]
[unused240]
[unused241]
[unused242]
[unused243]
[unused244]
[unused245]
[unused246]
[unused247]
[unused248]
[unused249]
[unused250]
[unused251]
[unused252]
[unused253]
[unused254]
[unused255]
[unused256]
[unused257]
[unused258]
[unused259]
[unused260]
[unused261]
[unused262]
[unused263]
[unused264]
[unused265]
[unused266]
[unused267]
[unused268]
[unused269]
[unused270]
[unused271]
[unused272]
[unused273]
[unused274]
[unused275]
[unused276]
[unused277]
[unused278]
[unused279]
[unused280]
[unused281]
[unused282]
[unused283]
[unused284]
[unused285]
[unused286]
[unused287]
[unused288]
[unused289]
[unused290]
[unused291]
[unused292]
[unused293]
[unused294]
[unused295]
[unused296]
[unused297]
[unused298]
[unused299]
[unused300]
[unused301]
[unused302]
[unused303]
[unused304]
[unused305]
[unused306]
[unused307]
[unused308]
[unused309]
[unused310]
[unused311]
[unused312]
[unused313]
[unused314]
[unused315]
[unused316]
[unused317]
[unused318]
[unused319]
[unused320]
[unused321]
[unused322]
[unused323]
[unused324]
[unused325]
[unused326]
[unused327]
[unused328]
[unused329]
[unused330]
[unused331]
[unused332]
[unused333]
[unused334]
[unused335]
[unused336]
[unused337]
[unused338]
[unused339]
[unused340]
[unused341]
[unused342]
[unused343]
[unused344]
[unused345]
[unused346]
[unused347]
[unused348]
[unused349]
[unused350]
[unused351]
[unused352]
[unused353]
[unused354]
[unused355]
[unused356]
[unused357]
[unused358]
[unused359]
[unused360]
[unused361]
[unused362]
[unused363]
[unused364]
[unused365]
[unused366]
[unused367]
[unused368]
[unused369]
[unused370]
[unused371]
[unused372]
[unused373]
[unused374]
[unused375]
[unused376]
[unused377]
[unused378]
[unused379]
[unused380]
[unused381]
[unused382]
[unused383]
[unused384]
[unused385]
[unused386]
[unused387]
[unused388]
[unused389]
[unused390]
[unused391]
[unused392]
[unused393]
[unused394]
[unused395]
[unused396]
[unused397]
[unused398]
[unused399]
[unused400]
[unused401]
[unused402]
[unused403]
[unused404]
[unused405]
[unused406]
[unused407]
[unused408]
[unused409]
[unused410]
[unused411]
[unused412]
[unused413]
[unused414]
[unused415]
[unused416]
[unused417]
[unused418]
[unused419]
[unused420]
[unused421]
[unused422]
[unused423]
[unused424]
[unused425]
[unused426]
[unused427]
[unused428]
[unused429]
[unused430]
[unused431]
[unused432]
[unused433]
[unused434]
[unused435]
[unused436]
[unused437]
[unused438]
[unused439]
[unused440]
[unused441]
[unused442]
[unused443]
[unused444]
[unused445]
[unused446]
[unused447]
[unused448]
[unused449]
[unused450]
[unused451]
[unused452]
[unused453]
[unused454]
[unused455]
[unused456]
[unused457]
[unused458]
[unused459]
[unused460]
[unused461]
[unused462]
[unused463]
[unused464]
[unused465]
[unused466]
[unused467]
[unused468]
[unused469]
[unused470]
[unused471]
[unused472]
[unused473]
[unused474]
[unused475]
[unused476]
[unused477]
[unused478]
[unused479]
[unused480]
[unused481]
[unused482]
[unused483]
[unused484]
[unused485]
[unused486]
[unused487]
[unused488]
[unused489]
[unused490]
[unused491]
[unused492]
[unused493]
[unused494]
[unused495]
[unused496]
[unused497]
[unused498]
[unused499]
[unused500]
[unused501]
[unused502]
[unused503]
[unused504]
[unused505]
[unused506]
[unused507]
[unused508]
[unused509]
[unused510]
[unused511]
[unused512]
[unused513]
[unused514]
[unused515]
[unused516]
[unused517]
[unused518]
[unused519]
[unused520]
[unused521]
[unused522]
[unused523]
[unused524]
[unused525]
[unused526]
[unused527]
[unused528]
[unused529]
[unused530]
[unused531]
[unused532]
[unused533]
[unused534]
[unused535]
[unused536]
[unused537]
[unused538]
[unused539]
[unused540]
[unused541]
[unused542]
[unused543]
[unused544]
[unused545]
[unused546]
[unused547]
[unused548]
[unused549]
[unused550]
[unused551]
[unused552]
[unused553]
[unused554]
[unused555]
[unused556]
[unused557]
[unused558]
[unused559]
[unused560]
[unused561]
[unused562]
[unused563]
[unused564]
[unused565]
[unused566]
[unused567]
[unused568]
[unused569]
[unused570]
[unused571]
[unused572]
[unused573]
[unused574]
[unused575]
[unused576]
[unused577]
[unused578]
[unused579]
[unused580]
[unused581]
[unused582]
[unused583]
[unused584]
[unused585]
[unused586]
[unused587]
[unused588]
[unused589]
[unused590]
[unused591]
[unused592]
[unused593]
[unused594]
[unused595]
[unused596]
[unused597]
[unused598]
[unused599]
[unused600]
[unused601]
[unused602]
[unused603]
[unused604]
[unused605]
[unused606]
[unused607]
[unused608]
[unused609]
[unused610]
[unused611]
[unused612]
[unused613]
[unused614]
[unused615]
[unused616]
[unused617]
[unused618]
[unused619]
[unused620]
[unused621]
[unused622]
[unused623]
[unused624]
[unused625]
[unused626]
[unused627]
[unused628]
[unused629]
[unused630]
[unused631]
[unused632]
[unused633]
[unused634]
[unused635]
[unused636]
[unused637]
[unused638]
[unused639]
[unused640]
[unused641]
[unused642]
[unused643]
[unused644]
[unused645]
[unused646]
[unused647]
[unused648]
[unused649]
[unused650]
[unused651]
[unused652]
[unused653]
[unused654]
[unused655]
[unused656]
[unused657]
[unused658]
[unused659]
[unused660]
[unused661]
[unused662]
[unused663]
[unused664]
[unused665]
[unused666]
[unused667]
[unused668]
[unused669]
[unused670]
[unused671]
[unused672]
[unused673]
[unused674]
[unused675]
[unused676]
[unused677]
[unused678]
[unused679]
[unused680]
[unused681]
[unused682]
[unused683]
[unused684]
[unused685]
[unused686]
[unused687]
[unused688]
[unused689]
[unused690]
[unused691]
[unused692]
[unused693]
[unused694]
[unused695]
[unused696]
[unused697]
[unused698]
[unused699]
[unused700]
[unused701]
[unused702]
[unused703]
[unused704]
[unused705]
[unused706]
[unused707]
[unused708]
[unused709]
[unused710]
[unused711]
[unused712]
[unused713]
[unused714]
[unused715]
[unused716]
[unused717]
[unused718]
[unused719]
[unused720]
[unused721]
[unused722]
[unused723]
[unused724]
[unused725]
[unused726]
[unused727]
[unused728]
[unused729]
[unused730]
[unused731]
[unused732]
[unused733]
[unused734]
[unused735]
[unused736]
[unused737]
[unused738]
[unused739]
[unused740]
[unused741]
[unused742]
[unused743]
[unused744]
[unused745]
[unused746]
[unused747]
[unused748]
[unused749]
[unused750]
[unused751]
[unused752]
[unused753]
[unused754]
[unused755]
[unused756]
[unused757]
[unused758]
[unused759]
[unused760]
[unused761]
[unused762]
[unused763]
[unused764]
[unused765]
[unused766]
[unused767]
[unused768]
[unused769]
[unused770]
[unused771]
[unused772]
[unused773]
[unused774]
[unused775]
[unused776]
[unused777]
[unused778]
[unused779]
[unused780]
[unused781]
[unused782]
[unused783]
[unused784]
[unused785]
[unused786]
[unused787]
[unused788]
[unused789]
[unused790]
[unused791]
[unused792]
[unused793]
[unused794]
[unused795]
[unused796]
[unused797]
[unused798]
[unused799]
[unused800]
[unused801]
[unused802]
[unused803]
[unused804]
[unused805]
[unused806]
[unused807]
[unused808]
[unused809]
[unused810]
[unused811]
[unused812]
[unused813]
[unused814]
[unused815]
[unused816]
[unused817]
[unused818]
[unused819]
[unused820]
[unused821]
[unused822]
[unused823]
[unused824]
[unused825]
[unused826]
[unused827]
[unused828]
[unused829]
[unused830]
[unused831]
[unused832]
[unused833]
[unused834]
[unused835]
[unused836]
[unused837]
[unused838]
[unused839]
[unused840]
[unused841]
[unused842]
[unused843]
[unused844]
[unused845]
[unused846]
[unused847]
[unused848]
[unused849]
[unused850]
[unused851]
[unused852]
[unused853]
[unused854]
[unused855]
[unused856]
[unused857]
[unused858]
[unused859]
[unused860]
[unused861]
[unused862]
[unused863]
[unused864]
[unused865]
[unused866]
[unused867]
[unused868]
[unused869]
[unused870]
[unused871]
[unused872]
[unused873]
[unused874]
[unused875]
[unused876]
[unused877]
[unused878]
[unused879]
[unused880]
[unused881]
[unused882]
[unused883]
[unused884]
[unused885]
[unused886]
[unused887]
[unused888]
[unused889]
[unused890]
[unused891]
[unused892]
[unused893]
[unused894]
[unused895]
[unused896]
[unused897]
[unused898]
[unused899]
[unused900]
[unused901]
[unused902]
[unused903]
[unused904]
[unused905]
[unused906]
[unused907]
[unused908]
[unused909]
[unused910]
[unused911]
[unused912]
[unused913]
[unused914]
[unused915]
[unused916]
[unused917]
[unused918]
[unused919]
[unused920]
[unused921]
[unused922]
[unused923]
[unused924]
[unused925]
[unused926]
[unused927]
[unused928]
[unused929]
[unused930]
[unused931]
[unused932]
[unused933]
[unused934]
[unused935]
[unused936]
[unused937]
[unused938]
[unused939]
[unused940]
[unused941]
[unused942]
[unused943]
[unused944]
[unused945]
[unused946]
[unused947]
[unused948]
[unused949]
[unused950]
[unused951]
[unused952]
[unused953]
[unused954]
[unused955]
[unused956]
[unused957]
[unused958]
[unused959]
[unused960]
[unused961]
[unused962]
[unused963]
[unused964]
[unused965]
[unused966]
[unused967]
[unused968]
[unused969]
[unused970]
[unused971]
[unused972]
[unused973]
[unused974]
[unused975]
[unused976]
[unused977]
[unused978]
[unused979]
[unused980]
[unused981]
[unused982]
[unused983]
[unused984]
[unused985]
[unused986]
[unused987]
[unused988]
[unused989]
[unused990]
[unused991]
[unused992]
[unused993]
!
"
#
$
%
&
'
(
)
*
+
,
-
.
/
0
1
2
3
4
5
6
7
8
9
:
;
<
=
>
?
@
[
\
]
^
_
`
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
q
r
s
t
u
v
w
x
y
z
{
|
}
~
¡
¢
£
¤
¥
¦
§
¨
©
ª
«
¬
®
°
±
²
³
´
µ
¶
·
¹
º
»
¼
½
¾
¿
×
ß
æ
ð
÷
ø
þ
đ
ħ
ı
ł
ŋ
œ
ƒ
ɐ
ɑ
ɒ
ɔ
ɕ
ə
ɛ
ɡ
ɣ
ɨ
ɪ
ɫ
ɬ
ɯ
ɲ
ɴ
ɹ
ɾ
ʀ
ʁ
ʂ
ʃ
ʉ
ʊ
ʋ
ʌ
ʎ
ʐ
ʑ
ʒ
ʔ
ʰ
ʲ
ʳ
ʷ
ʸ
ʻ
ʼ
ʾ
ʿ
ˈ
ː
ˡ
ˢ
ˣ
ˤ
α
β
γ
δ
ε
ζ
η
θ
ι
κ
λ
μ
ν
ξ
ο
π
ρ
ς
σ
τ
υ
φ
χ
ψ
ω
а
б
в
г
д
е
ж
з
и
к
л
м
н
о
п
р
с
т
у
ф
х
ц
ч
ш
щ
ъ
ы
ь
э
ю
я
ђ
є
і
ј
љ
њ
ћ
ӏ
ա
բ
գ
դ
ե
թ
ի
լ
կ
հ
մ
յ
ն
ո
պ
ս
վ
տ
ր
ւ
ք
־
א
ב
ג
ד
ה
ו
ז
ח
ט
י
ך
כ
ל
ם
מ
ן
נ
ס
ע
ף
פ
ץ
צ
ק
ר
ש
ת
،
ء
ا
ب
ة
ت
ث
ج
ح
خ
د
ذ
ر
ز
س
ش
ص
ض
ط
ظ
ع
غ
ـ
ف
ق
ك
ل
م
ن
ه
و
ى
ي
ٹ
پ
چ
ک
گ
ں
ھ
ہ
ی
ے
अ
आ
उ
ए
क
ख
ग
च
ज
ट
ड
ण
त
थ
द
ध
न
प
ब
भ
म
य
र
ल
व
श
ष
स
ह
ा
ि
ी
ो
।
॥
ং
অ
আ
ই
উ
এ
ও
ক
খ
গ
চ
ছ
জ
ট
ড
ণ
ত
থ
দ
ধ
ন
প
ব
ভ
ম
য
র
ল
শ
ষ
স
হ
া
ি
ী
ে
க
ச
ட
த
ந
ன
ப
ம
ய
ர
ல
ள
வ
ா
ி
ு
ே
ை
ನ
ರ
ಾ
ක
ය
ර
ල
ව
ා
ก
ง
ต
ท
น
พ
ม
ย
ร
ล
ว
ส
อ
า
เ
་
།
ག
ང
ད
ན
པ
བ
མ
འ
ར
ལ
ས
မ
ა
ბ
გ
დ
ე
ვ
თ
ი
კ
ლ
მ
ნ
ო
რ
ს
ტ
უ
ᄀ
ᄂ
ᄃ
ᄅ
ᄆ
ᄇ
ᄉ
ᄊ
ᄋ
ᄌ
ᄎ
ᄏ
ᄐ
ᄑ
ᄒ
ᅡ
ᅢ
ᅥ
ᅦ
ᅧ
ᅩ
ᅪ
ᅭ
ᅮ
ᅯ
ᅲ
ᅳ
ᅴ
ᅵ
ᆨ
ᆫ
ᆯ
ᆷ
ᆸ
ᆼ
ᴬ
ᴮ
ᴰ
ᴵ
ᴺ
ᵀ
ᵃ
ᵇ
ᵈ
ᵉ
ᵍ
ᵏ
ᵐ
ᵒ
ᵖ
ᵗ
ᵘ
ᵢ
ᵣ
ᵤ
ᵥ
ᶜ
ᶠ
‐
‑
‒
–
—
―
‖
‘
’
‚
“
”
„
†
‡
•
…
‰
′
″
›
‿
⁄
⁰
ⁱ
⁴
⁵
⁶
⁷
⁸
⁹
⁺
⁻
ⁿ
₀
₁
₂
₃
₄
₅
₆
₇
₈
₉
₊
₍
₎
ₐ
ₑ
ₒ
ₓ
ₕ
ₖ
ₗ
ₘ
ₙ
ₚ
ₛ
ₜ
₤
₩
€
₱
₹
ℓ
№
ℝ
™
⅓
⅔
←
↑
→
↓
↔
↦
⇄
⇌
⇒
∂
∅
∆
∇
∈
−
∗
∘
√
∞
∧
∨
∩
∪
≈
≡
≤
≥
⊂
⊆
⊕
⊗
⋅
─
│
■
▪
●
★
☆
☉
♠
♣
♥
♦
♭
♯
⟨
⟩
ⱼ
⺩
⺼
⽥
、
。
〈
〉
《
》
「
」
『
』
〜
あ
い
う
え
お
か
き
く
け
こ
さ
し
す
せ
そ
た
ち
っ
つ
て
と
な
に
ぬ
ね
の
は
ひ
ふ
へ
ほ
ま
み
む
め
も
や
ゆ
よ
ら
り
る
れ
ろ
を
ん
ァ
ア
ィ
イ
ウ
ェ
エ
オ
カ
キ
ク
ケ
コ
サ
シ
ス
セ
タ
チ
ッ
ツ
テ
ト
ナ
ニ
ノ
ハ
ヒ
フ
ヘ
ホ
マ
ミ
ム
メ
モ
ャ
ュ
ョ
ラ
リ
ル
レ
ロ
ワ
ン
・
ー
一
三
上
下
不
世
中
主
久
之
也
事
二
五
井
京
人
亻
仁
介
代
仮
伊
会
佐
侍
保
信
健
元
光
八
公
内
出
分
前
劉
力
加
勝
北
区
十
千
南
博
原
口
古
史
司
合
吉
同
名
和
囗
四
国
國
土
地
坂
城
堂
場
士
夏
外
大
天
太
夫
奈
女
子
学
宀
宇
安
宗
定
宣
宮
家
宿
寺
將
小
尚
山
岡
島
崎
川
州
巿
帝
平
年
幸
广
弘
張
彳
後
御
德
心
忄
志
忠
愛
成
我
戦
戸
手
扌
政
文
新
方
日
明
星
春
昭
智
曲
書
月
有
朝
木
本
李
村
東
松
林
森
楊
樹
橋
歌
止
正
武
比
氏
民
水
氵
氷
永
江
沢
河
治
法
海
清
漢
瀬
火
版
犬
王
生
田
男
疒
発
白
的
皇
目
相
省
真
石
示
社
神
福
禾
秀
秋
空
立
章
竹
糹
美
義
耳
良
艹
花
英
華
葉
藤
行
街
西
見
訁
語
谷
貝
貴
車
軍
辶
道
郎
郡
部
都
里
野
金
鈴
镇
長
門
間
阝
阿
陳
陽
雄
青
面
風
食
香
馬
高
龍
龸
fi
fl
!
(
)
,
-
.
/
:
?
~
the
of
and
in
to
was
he
is
as
for
on
with
that
it
his
by
at
from
her
##s
she
you
had
an
were
but
be
this
are
not
my
they
one
which
or
have
him
me
first
all
also
their
has
up
who
out
been
when
after
there
into
new
two
its
##a
time
would
no
what
about
said
we
over
then
other
so
more
##e
can
if
like
back
them
only
some
could
##i
where
just
##ing
during
before
##n
do
##o
made
school
through
than
now
years
most
world
may
between
down
well
three
##d
year
while
will
##ed
##r
##y
later
##t
city
under
around
did
such
being
used
state
people
part
know
against
your
many
second
university
both
national
##er
these
don
known
off
way
until
re
how
even
get
head
...
didn
##ly
team
american
because
de
##l
born
united
film
since
still
long
work
south
us
became
any
high
again
day
family
see
right
man
eyes
house
season
war
states
including
took
life
north
same
each
called
name
much
place
however
go
four
group
another
found
won
area
here
going
10
away
series
left
home
music
best
make
hand
number
company
several
never
last
john
000
very
album
take
end
good
too
following
released
game
played
little
began
district
##m
old
want
those
side
held
own
early
county
ll
league
use
west
##u
face
think
##es
2010
government
##h
march
came
small
general
town
june
##on
line
based
something
##k
september
thought
looked
along
international
2011
air
july
club
went
january
october
our
august
april
york
12
few
2012
2008
east
show
member
college
2009
father
public
##us
come
men
five
set
station
church
##c
next
former
november
room
party
located
december
2013
age
got
2007
##g
system
let
love
2006
though
every
2014
look
song
water
century
without
body
black
night
within
great
women
single
ve
building
large
population
river
named
band
white
started
##an
once
15
20
should
18
2015
service
top
built
british
open
death
king
moved
local
times
children
february
book
why
11
door
need
president
order
final
road
wasn
although
due
major
died
village
third
knew
2016
asked
turned
st
wanted
say
##p
together
received
main
son
served
different
##en
behind
himself
felt
members
power
football
law
voice
play
##in
near
park
history
30
having
2005
16
##man
saw
mother
##al
army
point
front
help
english
street
art
late
hands
games
award
##ia
young
14
put
published
country
division
across
told
13
often
ever
french
london
center
six
red
2017
led
days
include
light
25
find
tell
among
species
really
according
central
half
2004
form
original
gave
office
making
enough
lost
full
opened
must
included
live
given
german
player
run
business
woman
community
cup
might
million
land
2000
court
development
17
short
round
ii
km
seen
class
story
always
become
sure
research
almost
director
council
la
##2
career
things
using
island
##z
couldn
car
##is
24
close
force
##1
better
free
support
control
field
students
2003
education
married
##b
nothing
worked
others
record
big
inside
level
anything
continued
give
james
##3
military
established
non
returned
feel
does
title
written
thing
feet
william
far
co
association
hard
already
2002
##ra
championship
human
western
100
##na
department
hall
role
various
production
21
19
heart
2001
living
fire
version
##ers
##f
television
royal
##4
produced
working
act
case
society
region
present
radio
period
looking
least
total
keep
england
wife
program
per
brother
mind
special
22
##le
am
works
soon
##6
political
george
services
taken
created
##7
further
able
reached
david
union
joined
upon
done
important
social
information
either
##ic
##x
appeared
position
ground
lead
rock
dark
election
23
board
france
hair
course
arms
site
police
girl
instead
real
sound
##v
words
moment
##te
someone
##8
summer
project
announced
san
less
wrote
past
followed
##5
blue
founded
al
finally
india
taking
records
america
##ne
1999
design
considered
northern
god
stop
battle
toward
european
outside
described
track
today
playing
language
28
call
26
heard
professional
low
australia
miles
california
win
yet
green
##ie
trying
blood
##ton
southern
science
maybe
everything
match
square
27
mouth
video
race
recorded
leave
above
##9
daughter
points
space
1998
museum
change
middle
common
##0
move
tv
post
##ta
lake
seven
tried
elected
closed
ten
paul
minister
##th
months
start
chief
return
canada
person
sea
release
similar
modern
brought
rest
hit
formed
mr
##la
1997
floor
event
doing
thomas
1996
robert
care
killed
training
star
week
needed
turn
finished
railway
rather
news
health
sent
example
ran
term
michael
coming
currently
yes
forces
despite
gold
areas
50
stage
fact
29
dead
says
popular
2018
originally
germany
probably
developed
result
pulled
friend
stood
money
running
mi
signed
word
songs
child
eventually
met
tour
average
teams
minutes
festival
current
deep
kind
1995
decided
usually
eastern
seemed
##ness
episode
bed
added
table
indian
private
charles
route
available
idea
throughout
centre
addition
appointed
style
1994
books
eight
construction
press
mean
wall
friends
remained
schools
study
##ch
##um
institute
oh
chinese
sometimes
events
possible
1992
australian
type
brown
forward
talk
process
food
debut
seat
performance
committee
features
character
arts
herself
else
lot
strong
russian
range
hours
peter
arm
##da
morning
dr
sold
##ry
quickly
directed
1993
guitar
china
##w
31
list
##ma
performed
media
uk
players
smile
##rs
myself
40
placed
coach
province
towards
wouldn
leading
whole
boy
official
designed
grand
census
##el
europe
attack
japanese
henry
1991
##re
##os
cross
getting
alone
action
lower
network
wide
washington
japan
1990
hospital
believe
changed
sister
##ar
hold
gone
sir
hadn
ship
##ka
studies
academy
shot
rights
below
base
bad
involved
kept
largest
##ist
bank
future
especially
beginning
mark
movement
section
female
magazine
plan
professor
lord
longer
##ian
sat
walked
hill
actually
civil
energy
model
families
size
thus
aircraft
completed
includes
data
captain
##or
fight
vocals
featured
richard
bridge
fourth
1989
officer
stone
hear
##ism
means
medical
groups
management
self
lips
competition
entire
lived
technology
leaving
federal
tournament
bit
passed
hot
independent
awards
kingdom
mary
spent
fine
doesn
reported
##ling
jack
fall
raised
itself
stay
true
studio
1988
sports
replaced
paris
systems
saint
leader
theatre
whose
market
capital
parents
spanish
canadian
earth
##ity
cut
degree
writing
bay
christian
awarded
natural
higher
bill
##as
coast
provided
previous
senior
ft
valley
organization
stopped
onto
countries
parts
conference
queen
security
interest
saying
allowed
master
earlier
phone
matter
smith
winning
try
happened
moving
campaign
los
##ley
breath
nearly
mid
1987
certain
girls
date
italian
african
standing
fell
artist
##ted
shows
deal
mine
industry
1986
##ng
everyone
republic
provide
collection
library
student
##ville
primary
owned
older
via
heavy
1st
makes
##able
attention
anyone
africa
##ri
stated
length
ended
fingers
command
staff
skin
foreign
opening
governor
okay
medal
kill
sun
cover
job
1985
introduced
chest
hell
feeling
##ies
success
meet
reason
standard
meeting
novel
1984
trade
source
buildings
##land
rose
guy
goal
##ur
chapter
native
husband
previously
unit
limited
entered
weeks
producer
operations
mountain
takes
covered
forced
related
roman
complete
successful
key
texas
cold
##ya
channel
1980
traditional
films
dance
clear
approximately
500
nine
van
prince
question
active
tracks
ireland
regional
silver
author
personal
sense
operation
##ine
economic
1983
holding
twenty
isbn
additional
speed
hour
edition
regular
historic
places
whom
shook
movie
km²
secretary
prior
report
chicago
read
foundation
view
engine
scored
1982
units
ask
airport
property
ready
immediately
lady
month
listed
contract
##de
manager
themselves
lines
##ki
navy
writer
meant
##ts
runs
##ro
practice
championships
singer
glass
commission
required
forest
starting
culture
generally
giving
access
attended
test
couple
stand
catholic
martin
caught
executive
##less
eye
##ey
thinking
chair
quite
shoulder
1979
hope
decision
plays
defeated
municipality
whether
structure
offered
slowly
pain
ice
direction
##ion
paper
mission
1981
mostly
200
noted
individual
managed
nature
lives
plant
##ha
helped
except
studied
computer
figure
relationship
issue
significant
loss
die
smiled
gun
ago
highest
1972
##am
male
bring
goals
mexico
problem
distance
commercial
completely
location
annual
famous
drive
1976
neck
1978
surface
caused
italy
understand
greek
highway
wrong
hotel
comes
appearance
joseph
double
issues
musical
companies
castle
income
review
assembly
bass
initially
parliament
artists
experience
1974
particular
walk
foot
engineering
talking
window
dropped
##ter
miss
baby
boys
break
1975
stars
edge
remember
policy
carried
train
stadium
bar
sex
angeles
evidence
##ge
becoming
assistant
soviet
1977
upper
step
wing
1970
youth
financial
reach
##ll
actor
numerous
##se
##st
nodded
arrived
##ation
minute
##nt
believed
sorry
complex
beautiful
victory
associated
temple
1968
1973
chance
perhaps
metal
##son
1945
bishop
##et
lee
launched
particularly
tree
le
retired
subject
prize
contains
yeah
theory
empire
##ce
suddenly
waiting
trust
recording
##to
happy
terms
camp
champion
1971
religious
pass
zealand
names
2nd
port
ancient
tom
corner
represented
watch
legal
anti
justice
cause
watched
brothers
45
material
changes
simply
response
louis
fast
##ting
answer
60
historical
1969
stories
straight
create
feature
increased
rate
administration
virginia
el
activities
cultural
overall
winner
programs
basketball
legs
guard
beyond
cast
doctor
mm
flight
results
remains
cost
effect
winter
##ble
larger
islands
problems
chairman
grew
commander
isn
1967
pay
failed
selected
hurt
fort
box
regiment
majority
journal
35
edward
plans
##ke
##ni
shown
pretty
irish
characters
directly
scene
likely
operated
allow
spring
##j
junior
matches
looks
mike
houses
fellow
##tion
beach
marriage
##ham
##ive
rules
oil
65
florida
expected
nearby
congress
sam
peace
recent
iii
wait
subsequently
cell
##do
variety
serving
agreed
please
poor
joe
pacific
attempt
wood
democratic
piece
prime
##ca
rural
mile
touch
appears
township
1964
1966
soldiers
##men
##ized
1965
pennsylvania
closer
fighting
claimed
score
jones
physical
editor
##ous
filled
genus
specific
sitting
super
mom
##va
therefore
supported
status
fear
cases
store
meaning
wales
minor
spain
tower
focus
vice
frank
follow
parish
separate
golden
horse
fifth
remaining
branch
32
presented
stared
##id
uses
secret
forms
##co
baseball
exactly
##ck
choice
note
discovered
travel
composed
truth
russia
ball
color
kiss
dad
wind
continue
ring
referred
numbers
digital
greater
##ns
metres
slightly
direct
increase
1960
responsible
crew
rule
trees
troops
##no
broke
goes
individuals
hundred
weight
creek
sleep
memory
defense
provides
ordered
code
value
jewish
windows
1944
safe
judge
whatever
corps
realized
growing
pre
##ga
cities
alexander
gaze
lies
spread
scott
letter
showed
situation
mayor
transport
watching
workers
extended
##li
expression
normal
##ment
chart
multiple
border
##ba
host
##ner
daily
mrs
walls
piano
##ko
heat
cannot
##ate
earned
products
drama
era
authority
seasons
join
grade
##io
sign
difficult
machine
1963
territory
mainly
##wood
stations
squadron
1962
stepped
iron
19th
##led
serve
appear
sky
speak
broken
charge
knowledge
kilometres
removed
ships
article
campus
simple
##ty
pushed
britain
##ve
leaves
recently
cd
soft
boston
latter
easy
acquired
poland
##sa
quality
officers
presence
planned
nations
mass
broadcast
jean
share
image
influence
wild
offer
emperor
electric
reading
headed
ability
promoted
yellow
ministry
1942
throat
smaller
politician
##by
latin
spoke
cars
williams
males
lack
pop
80
##ier
acting
seeing
consists
##ti
estate
1961
pressure
johnson
newspaper
jr
chris
olympics
online
conditions
beat
elements
walking
vote
##field
needs
carolina
text
featuring
global
block
shirt
levels
francisco
purpose
females
et
dutch
duke
ahead
gas
twice
safety
serious
turning
highly
lieutenant
firm
maria
amount
mixed
daniel
proposed
perfect
agreement
affairs
3rd
seconds
contemporary
paid
1943
prison
save
kitchen
label
administrative
intended
constructed
academic
nice
teacher
races
1956
formerly
corporation
ben
nation
issued
shut
1958
drums
housing
victoria
seems
opera
1959
graduated
function
von
mentioned
picked
build
recognized
shortly
protection
picture
notable
exchange
elections
1980s
loved
percent
racing
fish
elizabeth
garden
volume
hockey
1941
beside
settled
##ford
1940
competed
replied
drew
1948
actress
marine
scotland
steel
glanced
farm
steve
1957
risk
tonight
positive
magic
singles
effects
gray
screen
dog
##ja
residents
bus
sides
none
secondary
literature
polish
destroyed
flying
founder
households
1939
lay
reserve
usa
gallery
##ler
1946
industrial
younger
approach
appearances
urban
ones
1950
finish
avenue
powerful
fully
growth
page
honor
jersey
projects
advanced
revealed
basic
90
infantry
pair
equipment
visit
33
evening
search
grant
effort
solo
treatment
buried
republican
primarily
bottom
owner
1970s
israel
gives
jim
dream
bob
remain
spot
70
notes
produce
champions
contact
ed
soul
accepted
ways
del
##ally
losing
split
price
capacity
basis
trial
questions
##ina
1955
20th
guess
officially
memorial
naval
initial
##ization
whispered
median
engineer
##ful
sydney
##go
columbia
strength
300
1952
tears
senate
00
card
asian
agent
1947
software
44
draw
warm
supposed
com
pro
##il
transferred
leaned
##at
candidate
escape
mountains
asia
potential
activity
entertainment
seem
traffic
jackson
murder
36
slow
product
orchestra
haven
agency
bbc
taught
website
comedy
unable
storm
planning
albums
rugby
environment
scientific
grabbed
protect
##hi
boat
typically
1954
1953
damage
principal
divided
dedicated
mount
ohio
##berg
pick
fought
driver
##der
empty
shoulders
sort
thank
berlin
prominent
account
freedom
necessary
efforts
alex
headquarters
follows
alongside
des
simon
andrew
suggested
operating
learning
steps
1949
sweet
technical
begin
easily
34
teeth
speaking
settlement
scale
##sh
renamed
ray
max
enemy
semi
joint
compared
##rd
scottish
leadership
analysis
offers
georgia
pieces
captured
animal
deputy
guest
organized
##lin
tony
combined
method
challenge
1960s
huge
wants
battalion
sons
rise
crime
types
facilities
telling
path
1951
platform
sit
1990s
##lo
tells
assigned
rich
pull
##ot
commonly
alive
##za
letters
concept
conducted
wearing
happen
bought
becomes
holy
gets
ocean
defeat
languages
purchased
coffee
occurred
titled
##q
declared
applied
sciences
concert
sounds
jazz
brain
##me
painting
fleet
tax
nick
##ius
michigan
count
animals
leaders
episodes
##line
content
##den
birth
##it
clubs
64
palace
critical
refused
fair
leg
laughed
returning
surrounding
participated
formation
lifted
pointed
connected
rome
medicine
laid
taylor
santa
powers
adam
tall
shared
focused
knowing
yards
entrance
falls
##wa
calling
##ad
sources
chosen
beneath
resources
yard
##ite
nominated
silence
zone
defined
##que
gained
thirty
38
bodies
moon
##ard
adopted
christmas
widely
register
apart
iran
premier
serves
du
unknown
parties
##les
generation
##ff
continues
quick
fields
brigade
quiet
teaching
clothes
impact
weapons
partner
flat
theater
supreme
1938
37
relations
##tor
plants
suffered
1936
wilson
kids
begins
##age
1918
seats
armed
internet
models
worth
laws
400
communities
classes
background
knows
thanks
quarter
reaching
humans
carry
killing
format
kong
hong
setting
75
architecture
disease
railroad
inc
possibly
wish
arthur
thoughts
harry
doors
density
##di
crowd
illinois
stomach
tone
unique
reports
anyway
##ir
liberal
der
vehicle
thick
dry
drug
faced
largely
facility
theme
holds
creation
strange
colonel
##mi
revolution
bell
politics
turns
silent
rail
relief
independence
combat
shape
write
determined
sales
learned
4th
finger
oxford
providing
1937
heritage
fiction
situated
designated
allowing
distribution
hosted
##est
sight
interview
estimated
reduced
##ria
toronto
footballer
keeping
guys
damn
claim
motion
sport
sixth
stayed
##ze
en
rear
receive
handed
twelve
dress
audience
granted
brazil
##well
spirit
##ated
noticed
etc
olympic
representative
eric
tight
trouble
reviews
drink
vampire
missing
roles
ranked
newly
household
finals
wave
critics
##ee
phase
massachusetts
pilot
unlike
philadelphia
bright
guns
crown
organizations
roof
42
respectively
clearly
tongue
marked
circle
fox
korea
bronze
brian
expanded
sexual
supply
yourself
inspired
labour
fc
##ah
reference
vision
draft
connection
brand
reasons
1935
classic
driving
trip
jesus
cells
entry
1920
neither
trail
claims
atlantic
orders
labor
nose
afraid
identified
intelligence
calls
cancer
attacked
passing
stephen
positions
imperial
grey
jason
39
sunday
48
swedish
avoid
extra
uncle
message
covers
allows
surprise
materials
fame
hunter
##ji
1930
citizens
figures
davis
environmental
confirmed
shit
titles
di
performing
difference
acts
attacks
##ov
existing
votes
opportunity
nor
shop
entirely
trains
opposite
pakistan
##pa
develop
resulted
representatives
actions
reality
pressed
##ish
barely
wine
conversation
faculty
northwest
ends
documentary
nuclear
stock
grace
sets
eat
alternative
##ps
bag
resulting
creating
surprised
cemetery
1919
drop
finding
sarah
cricket
streets
tradition
ride
1933
exhibition
target
ear
explained
rain
composer
injury
apartment
municipal
educational
occupied
netherlands
clean
billion
constitution
learn
1914
maximum
classical
francis
lose
opposition
jose
ontario
bear
core
hills
rolled
ending
drawn
permanent
fun
##tes
##lla
lewis
sites
chamber
ryan
##way
scoring
height
1934
##house
lyrics
staring
55
officials
1917
snow
oldest
##tic
orange
##ger
qualified
interior
apparently
succeeded
thousand
dinner
lights
existence
fans
heavily
41
greatest
conservative
send
bowl
plus
enter
catch
##un
economy
duty
1929
speech
authorities
princess
performances
versions
shall
graduate
pictures
effective
remembered
poetry
desk
crossed
starring
starts
passenger
sharp
##ant
acres
ass
weather
falling
rank
fund
supporting
check
adult
publishing
heads
cm
southeast
lane
##burg
application
bc
##ura
les
condition
transfer
prevent
display
ex
regions
earl
federation
cool
relatively
answered
besides
1928
obtained
portion
##town
mix
##ding
reaction
liked
dean
express
peak
1932
##tte
counter
religion
chain
rare
miller
convention
aid
lie
vehicles
mobile
perform
squad
wonder
lying
crazy
sword
##ping
attempted
centuries
weren
philosophy
category
##ize
anna
interested
47
sweden
wolf
frequently
abandoned
kg
literary
alliance
task
entitled
##ay
threw
promotion
factory
tiny
soccer
visited
matt
fm
achieved
52
defence
internal
persian
43
methods
##ging
arrested
otherwise
cambridge
programming
villages
elementary
districts
rooms
criminal
conflict
worry
trained
1931
attempts
waited
signal
bird
truck
subsequent
programme
##ol
ad
49
communist
details
faith
sector
patrick
carrying
laugh
##ss
controlled
korean
showing
origin
fuel
evil
1927
##ent
brief
identity
darkness
address
pool
missed
publication
web
planet
ian
anne
wings
invited
##tt
briefly
standards
kissed
##be
ideas
climate
causing
walter
worse
albert
articles
winners
desire
aged
northeast
dangerous
gate
doubt
1922
wooden
multi
##ky
poet
rising
funding
46
communications
communication
violence
copies
prepared
ford
investigation
skills
1924
pulling
electronic
##ak
##ial
##han
containing
ultimately
offices
singing
understanding
restaurant
tomorrow
fashion
christ
ward
da
pope
stands
5th
flow
studios
aired
commissioned
contained
exist
fresh
americans
##per
wrestling
approved
kid
employed
respect
suit
1925
angel
asking
increasing
frame
angry
selling
1950s
thin
finds
##nd
temperature
statement
ali
explain
inhabitants
towns
extensive
narrow
51
jane
flowers
images
promise
somewhere
object
fly
closely
##ls
1912
bureau
cape
1926
weekly
presidential
legislative
1921
##ai
##au
launch
founding
##ny
978
##ring
artillery
strike
un
institutions
roll
writers
landing
chose
kevin
anymore
pp
##ut
attorney
fit
dan
billboard
receiving
agricultural
breaking
sought
dave
admitted
lands
mexican
##bury
charlie
specifically
hole
iv
howard
credit
moscow
roads
accident
1923
proved
wear
struck
hey
guards
stuff
slid
expansion
1915
cat
anthony
##kin
melbourne
opposed
sub
southwest
architect
failure
plane
1916
##ron
map
camera
tank
listen
regarding
wet
introduction
metropolitan
link
ep
fighter
inch
grown
gene
anger
fixed
buy
dvd
khan
domestic
worldwide
chapel
mill
functions
examples
##head
developing
1910
turkey
hits
pocket
antonio
papers
grow
unless
circuit
18th
concerned
attached
journalist
selection
journey
converted
provincial
painted
hearing
aren
bands
negative
aside
wondered
knight
lap
survey
ma
##ow
noise
billy
##ium
shooting
guide
bedroom
priest
resistance
motor
homes
sounded
giant
##mer
150
scenes
equal
comic
patients
hidden
solid
actual
bringing
afternoon
touched
funds
wedding
consisted
marie
canal
sr
kim
treaty
turkish
recognition
residence
cathedral
broad
knees
incident
shaped
fired
norwegian
handle
cheek
contest
represent
##pe
representing
beauty
##sen
birds
advantage
emergency
wrapped
drawing
notice
pink
broadcasting
##ong
somehow
bachelor
seventh
collected
registered
establishment
alan
assumed
chemical
personnel
roger
retirement
jeff
portuguese
wore
tied
device
threat
progress
advance
##ised
banks
hired
manchester
nfl
teachers
structures
forever
##bo
tennis
helping
saturday
sale
applications
junction
hip
incorporated
neighborhood
dressed
ceremony
##ds
influenced
hers
visual
stairs
decades
inner
kansas
hung
hoped
gain
scheduled
downtown
engaged
austria
clock
norway
certainly
pale
protected
1913
victor
employees
plate
putting
surrounded
##ists
finishing
blues
tropical
##ries
minnesota
consider
philippines
accept
54
retrieved
1900
concern
anderson
properties
institution
gordon
successfully
vietnam
##dy
backing
outstanding
muslim
crossing
folk
producing
usual
demand
occurs
observed
lawyer
educated
##ana
kelly
string
pleasure
budget
items
quietly
colorado
philip
typical
##worth
derived
600
survived
asks
mental
##ide
56
jake
jews
distinguished
ltd
1911
sri
extremely
53
athletic
loud
thousands
worried
shadow
transportation
horses
weapon
arena
importance
users
tim
objects
contributed
dragon
douglas
aware
senator
johnny
jordan
sisters
engines
flag
investment
samuel
shock
capable
clark
row
wheel
refers
session
familiar
biggest
wins
hate
maintained
drove
hamilton
request
expressed
injured
underground
churches
walker
wars
tunnel
passes
stupid
agriculture
softly
cabinet
regarded
joining
indiana
##ea
##ms
push
dates
spend
behavior
woods
protein
gently
chase
morgan
mention
burning
wake
combination
occur
mirror
leads
jimmy
indeed
impossible
singapore
paintings
covering
##nes
soldier
locations
attendance
sell
historian
wisconsin
invasion
argued
painter
diego
changing
egypt
##don
experienced
inches
##ku
missouri
vol
grounds
spoken
switzerland
##gan
reform
rolling
ha
forget
massive
resigned
burned
allen
tennessee
locked
values
improved
##mo
wounded
universe
sick
dating
facing
pack
purchase
user
##pur
moments
##ul
merged
anniversary
1908
coal
brick
understood
causes
dynasty
queensland
establish
stores
crisis
promote
hoping
views
cards
referee
extension
##si
raise
arizona
improve
colonial
formal
charged
##rt
palm
lucky
hide
rescue
faces
95
feelings
candidates
juan
##ell
goods
6th
courses
weekend
59
luke
cash
fallen
##om
delivered
affected
installed
carefully
tries
swiss
hollywood
costs
lincoln
responsibility
##he
shore
file
proper
normally
maryland
assistance
jump
constant
offering
friendly
waters
persons
realize
contain
trophy
800
partnership
factor
58
musicians
cry
bound
oregon
indicated
hero
houston
medium
##ure
consisting
somewhat
##ara
57
cycle
##che
beer
moore
frederick
gotten
eleven
worst
weak
approached
arranged
chin
loan
universal
bond
fifteen
pattern
disappeared
##ney
translated
##zed
lip
arab
capture
interests
insurance
##chi
shifted
cave
prix
warning
sections
courts
coat
plot
smell
feed
golf
favorite
maintain
knife
vs
voted
degrees
finance
quebec
opinion
translation
manner
ruled
operate
productions
choose
musician
discovery
confused
tired
separated
stream
techniques
committed
attend
ranking
kings
throw
passengers
measure
horror
fan
mining
sand
danger
salt
calm
decade
dam
require
runner
##ik
rush
associate
greece
##ker
rivers
consecutive
matthew
##ski
sighed
sq
documents
steam
edited
closing
tie
accused
1905
##ini
islamic
distributed
directors
organisation
bruce
7th
breathing
mad
lit
arrival
concrete
taste
08
composition
shaking
faster
amateur
adjacent
stating
1906
twin
flew
##ran
tokyo
publications
##tone
obviously
ridge
storage
1907
carl
pages
concluded
desert
driven
universities
ages
terminal
sequence
borough
250
constituency
creative
cousin
economics
dreams
margaret
notably
reduce
montreal
mode
17th
ears
saved
jan
vocal
##ica
1909
andy
##jo
riding
roughly
threatened
##ise
meters
meanwhile
landed
compete
repeated
grass
czech
regularly
charges
tea
sudden
appeal
##ung
solution
describes
pierre
classification
glad
parking
##ning
belt
physics
99
rachel
add
hungarian
participate
expedition
damaged
gift
childhood
85
fifty
##red
mathematics
jumped
letting
defensive
mph
##ux
##gh
testing
##hip
hundreds
shoot
owners
matters
smoke
israeli
kentucky
dancing
mounted
grandfather
emma
designs
profit
argentina
##gs
truly
li
lawrence
cole
begun
detroit
willing
branches
smiling
decide
miami
enjoyed
recordings
##dale
poverty
ethnic
gay
##bi
gary
arabic
09
accompanied
##one
##ons
fishing
determine
residential
acid
##ary
alice
returns
starred
mail
##ang
jonathan
strategy
##ue
net
forty
cook
businesses
equivalent
commonwealth
distinct
ill
##cy
seriously
##ors
##ped
shift
harris
replace
rio
imagine
formula
ensure
##ber
additionally
scheme
conservation
occasionally
purposes
feels
favor
##and
##ore
1930s
contrast
hanging
hunt
movies
1904
instruments
victims
danish
christopher
busy
demon
sugar
earliest
colony
studying
balance
duties
##ks
belgium
slipped
carter
05
visible
stages
iraq
fifa
##im
commune
forming
zero
07
continuing
talked
counties
legend
bathroom
option
tail
clay
daughters
afterwards
severe
jaw
visitors
##ded
devices
aviation
russell
kate
##vi
entering
subjects
##ino
temporary
swimming
forth
smooth
ghost
audio
bush
operates
rocks
movements
signs
eddie
##tz
ann
voices
honorary
06
memories
dallas
pure
measures
racial
promised
66
harvard
ceo
16th
parliamentary
indicate
benefit
flesh
dublin
louisiana
1902
1901
patient
sleeping
1903
membership
coastal
medieval
wanting
element
scholars
rice
62
limit
survive
makeup
rating
definitely
collaboration
obvious
##tan
boss
ms
baron
birthday
linked
soil
diocese
##lan
ncaa
##mann
offensive
shell
shouldn
waist
##tus
plain
ross
organ
resolution
manufacturing
adding
relative
kennedy
98
whilst
moth
marketing
gardens
crash
72
heading
partners
credited
carlos
moves
cable
##zi
marshall
##out
depending
bottle
represents
rejected
responded
existed
04
jobs
denmark
lock
##ating
treated
graham
routes
talent
commissioner
drugs
secure
tests
reign
restored
photography
##gi
contributions
oklahoma
designer
disc
grin
seattle
robin
paused
atlanta
unusual
##gate
praised
las
laughing
satellite
hungary
visiting
##sky
interesting
factors
deck
poems
norman
##water
stuck
speaker
rifle
domain
premiered
##her
dc
comics
actors
01
reputation
eliminated
8th
ceiling
prisoners
script
##nce
leather
austin
mississippi
rapidly
admiral
parallel
charlotte
guilty
tools
gender
divisions
fruit
##bs
laboratory
nelson
fantasy
marry
rapid
aunt
tribe
requirements
aspects
suicide
amongst
adams
bone
ukraine
abc
kick
sees
edinburgh
clothing
column
rough
gods
hunting
broadway
gathered
concerns
##ek
spending
ty
12th
snapped
requires
solar
bones
cavalry
##tta
iowa
drinking
waste
index
franklin
charity
thompson
stewart
tip
flash
landscape
friday
enjoy
singh
poem
listening
##back
eighth
fred
differences
adapted
bomb
ukrainian
surgery
corporate
masters
anywhere
##more
waves
odd
sean
portugal
orleans
dick
debate
kent
eating
puerto
cleared
96
expect
cinema
97
guitarist
blocks
electrical
agree
involving
depth
dying
panel
struggle
##ged
peninsula
adults
novels
emerged
vienna
metro
debuted
shoes
tamil
songwriter
meets
prove
beating
instance
heaven
scared
sending
marks
artistic
passage
superior
03
significantly
shopping
##tive
retained
##izing
malaysia
technique
cheeks
##ola
warren
maintenance
destroy
extreme
allied
120
appearing
##yn
fill
advice
alabama
qualifying
policies
cleveland
hat
battery
smart
authors
10th
soundtrack
acted
dated
lb
glance
equipped
coalition
funny
outer
ambassador
roy
possibility
couples
campbell
dna
loose
ethan
supplies
1898
gonna
88
monster
##res
shake
agents
frequency
springs
dogs
practices
61
gang
plastic
easier
suggests
gulf
blade
exposed
colors
industries
markets
pan
nervous
electoral
charts
legislation
ownership
##idae
mac
appointment
shield
copy
assault
socialist
abbey
monument
license
throne
employment
jay
93
replacement
charter
cloud
powered
suffering
accounts
oak
connecticut
strongly
wright
colour
crystal
13th
context
welsh
networks
voiced
gabriel
jerry
##cing
forehead
mp
##ens
manage
schedule
totally
remix
##ii
forests
occupation
print
nicholas
brazilian
strategic
vampires
engineers
76
roots
seek
correct
instrumental
und
alfred
backed
hop
##des
stanley
robinson
traveled
wayne
welcome
austrian
achieve
67
exit
rates
1899
strip
whereas
##cs
sing
deeply
adventure
bobby
rick
jamie
careful
components
cap
useful
personality
knee
##shi
pushing
hosts
02
protest
ca
ottoman
symphony
##sis
63
boundary
1890
processes
considering
considerable
tons
##work
##ft
##nia
cooper
trading
dear
conduct
91
illegal
apple
revolutionary
holiday
definition
harder
##van
jacob
circumstances
destruction
##lle
popularity
grip
classified
liverpool
donald
baltimore
flows
seeking
honour
approval
92
mechanical
till
happening
statue
critic
increasingly
immediate
describe
commerce
stare
##ster
indonesia
meat
rounds
boats
baker
orthodox
depression
formally
worn
naked
claire
muttered
sentence
11th
emily
document
77
criticism
wished
vessel
spiritual
bent
virgin
parker
minimum
murray
lunch
danny
printed
compilation
keyboards
false
blow
belonged
68
raising
78
cutting
##board
pittsburgh
##up
9th
shadows
81
hated
indigenous
jon
15th
barry
scholar
ah
##zer
oliver
##gy
stick
susan
meetings
attracted
spell
romantic
##ver
ye
1895
photo
demanded
customers
##ac
1896
logan
revival
keys
modified
commanded
jeans
##ious
upset
raw
phil
detective
hiding
resident
vincent
##bly
experiences
diamond
defeating
coverage
lucas
external
parks
franchise
helen
bible
successor
percussion
celebrated
il
lift
profile
clan
romania
##ied
mills
##su
nobody
achievement
shrugged
fault
1897
rhythm
initiative
breakfast
carbon
700
69
lasted
violent
74
wound
ken
killer
gradually
filmed
°c
dollars
processing
94
remove
criticized
guests
sang
chemistry
##vin
legislature
disney
##bridge
uniform
escaped
integrated
proposal
purple
denied
liquid
karl
influential
morris
nights
stones
intense
experimental
twisted
71
84
##ld
pace
nazi
mitchell
ny
blind
reporter
newspapers
14th
centers
burn
basin
forgotten
surviving
filed
collections
monastery
losses
manual
couch
description
appropriate
merely
tag
missions
sebastian
restoration
replacing
triple
73
elder
julia
warriors
benjamin
julian
convinced
stronger
amazing
declined
versus
merchant
happens
output
finland
bare
barbara
absence
ignored
dawn
injuries
##port
producers
##ram
82
luis
##ities
kw
admit
expensive
electricity
nba
exception
symbol
##ving
ladies
shower
sheriff
characteristics
##je
aimed
button
ratio
effectively
summit
angle
jury
bears
foster
vessels
pants
executed
evans
dozen
advertising
kicked
patrol
1889
competitions
lifetime
principles
athletics
##logy
birmingham
sponsored
89
rob
nomination
1893
acoustic
##sm
creature
longest
##tra
credits
harbor
dust
josh
##so
territories
milk
infrastructure
completion
thailand
indians
leon
archbishop
##sy
assist
pitch
blake
arrangement
girlfriend
serbian
operational
hence
sad
scent
fur
dj
sessions
hp
refer
rarely
##ora
exists
1892
##ten
scientists
dirty
penalty
burst
portrait
seed
79
pole
limits
rival
1894
stable
alpha
grave
constitutional
alcohol
arrest
flower
mystery
devil
architectural
relationships
greatly
habitat
##istic
larry
progressive
remote
cotton
##ics
##ok
preserved
reaches
##ming
cited
86
vast
scholarship
decisions
cbs
joy
teach
1885
editions
knocked
eve
searching
partly
participation
gap
animated
fate
excellent
##ett
na
87
alternate
saints
youngest
##ily
climbed
##ita
##tors
suggest
##ct
discussion
staying
choir
lakes
jacket
revenue
nevertheless
peaked
instrument
wondering
annually
managing
neil
1891
signing
terry
##ice
apply
clinical
brooklyn
aim
catherine
fuck
farmers
figured
ninth
pride
hugh
evolution
ordinary
involvement
comfortable
shouted
tech
encouraged
taiwan
representation
sharing
##lia
##em
panic
exact
cargo
competing
fat
cried
83
1920s
occasions
pa
cabin
borders
utah
marcus
##isation
badly
muscles
##ance
victorian
transition
warner
bet
permission
##rin
slave
terrible
similarly
shares
seth
uefa
possession
medals
benefits
colleges
lowered
perfectly
mall
transit
##ye
##kar
publisher
##ened
harrison
deaths
elevation
##ae
asleep
machines
sigh
ash
hardly
argument
occasion
parent
leo
decline
1888
contribution
##ua
concentration
1000
opportunities
hispanic
guardian
extent
emotions
hips
mason
volumes
bloody
controversy
diameter
steady
mistake
phoenix
identify
violin
##sk
departure
richmond
spin
funeral
enemies
1864
gear
literally
connor
random
sergeant
grab
confusion
1865
transmission
informed
op
leaning
sacred
suspended
thinks
gates
portland
luck
agencies
yours
hull
expert
muscle
layer
practical
sculpture
jerusalem
latest
lloyd
statistics
deeper
recommended
warrior
arkansas
mess
supports
greg
eagle
1880
recovered
rated
concerts
rushed
##ano
stops
eggs
files
premiere
keith
##vo
delhi
turner
pit
affair
belief
paint
##zing
mate
##ach
##ev
victim
##ology
withdrew
bonus
styles
fled
##ud
glasgow
technologies
funded
nbc
adaptation
##ata
portrayed
cooperation
supporters
judges
bernard
justin
hallway
ralph
##ick
graduating
controversial
distant
continental
spider
bite
##ho
recognize
intention
mixing
##ese
egyptian
bow
tourism
suppose
claiming
tiger
dominated
participants
vi
##ru
nurse
partially
tape
##rum
psychology
##rn
essential
touring
duo
voting
civilian
emotional
channels
##king
apparent
hebrew
1887
tommy
carrier
intersection
beast
hudson
##gar
##zo
lab
nova
bench
discuss
costa
##ered
detailed
behalf
drivers
unfortunately
obtain
##lis
rocky
##dae
siege
friendship
honey
##rian
1861
amy
hang
posted
governments
collins
respond
wildlife
preferred
operator
##po
laura
pregnant
videos
dennis
suspected
boots
instantly
weird
automatic
businessman
alleged
placing
throwing
ph
mood
1862
perry
venue
jet
remainder
##lli
##ci
passion
biological
boyfriend
1863
dirt
buffalo
ron
segment
fa
abuse
##era
genre
thrown
stroke
colored
stress
exercise
displayed
##gen
struggled
##tti
abroad
dramatic
wonderful
thereafter
madrid
component
widespread
##sed
tale
citizen
todd
monday
1886
vancouver
overseas
forcing
crying
descent
##ris
discussed
substantial
ranks
regime
1870
provinces
switch
drum
zane
ted
tribes
proof
lp
cream
researchers
volunteer
manor
silk
milan
donated
allies
venture
principle
delivery
enterprise
##ves
##ans
bars
traditionally
witch
reminded
copper
##uk
pete
inter
links
colin
grinned
elsewhere
competitive
frequent
##oy
scream
##hu
tension
texts
submarine
finnish
defending
defend
pat
detail
1884
affiliated
stuart
themes
villa
periods
tool
belgian
ruling
crimes
answers
folded
licensed
resort
demolished
hans
lucy
1881
lion
traded
photographs
writes
craig
##fa
trials
generated
beth
noble
debt
percentage
yorkshire
erected
ss
viewed
grades
confidence
ceased
islam
telephone
retail
##ible
chile
m²
roberts
sixteen
##ich
commented
hampshire
innocent
dual
pounds
checked
regulations
afghanistan
sung
rico
liberty
assets
bigger
options
angels
relegated
tribute
wells
attending
leaf
##yan
butler
romanian
forum
monthly
lisa
patterns
gmina
##tory
madison
hurricane
rev
##ians
bristol
##ula
elite
valuable
disaster
democracy
awareness
germans
freyja
##ins
loop
absolutely
paying
populations
maine
sole
prayer
spencer
releases
doorway
bull
##ani
lover
midnight
conclusion
##sson
thirteen
lily
mediterranean
##lt
nhl
proud
sample
##hill
drummer
guinea
##ova
murphy
climb
##ston
instant
attributed
horn
ain
railways
steven
##ao
autumn
ferry
opponent
root
traveling
secured
corridor
stretched
tales
sheet
trinity
cattle
helps
indicates
manhattan
murdered
fitted
1882
gentle
grandmother
mines
shocked
vegas
produces
##light
caribbean
##ou
belong
continuous
desperate
drunk
historically
trio
waved
raf
dealing
nathan
bat
murmured
interrupted
residing
scientist
pioneer
harold
aaron
##net
delta
attempting
minority
mini
believes
chorus
tend
lots
eyed
indoor
load
shots
updated
jail
##llo
concerning
connecting
wealth
##ved
slaves
arrive
rangers
sufficient
rebuilt
##wick
cardinal
flood
muhammad
whenever
relation
runners
moral
repair
viewers
arriving
revenge
punk
assisted
bath
fairly
breathe
lists
innings
illustrated
whisper
nearest
voters
clinton
ties
ultimate
screamed
beijing
lions
andre
fictional
gathering
comfort
radar
suitable
dismissed
hms
ban
pine
wrist
atmosphere
voivodeship
bid
timber
##ned
##nan
giants
##ane
cameron
recovery
uss
identical
categories
switched
serbia
laughter
noah
ensemble
therapy
peoples
touching
##off
locally
pearl
platforms
everywhere
ballet
tables
lanka
herbert
outdoor
toured
derek
1883
spaces
contested
swept
1878
exclusive
slight
connections
##dra
winds
prisoner
collective
bangladesh
tube
publicly
wealthy
thai
##ys
isolated
select
##ric
insisted
pen
fortune
ticket
spotted
reportedly
animation
enforcement
tanks
110
decides
wider
lowest
owen
##time
nod
hitting
##hn
gregory
furthermore
magazines
fighters
solutions
##ery
pointing
requested
peru
reed
chancellor
knights
mask
worker
eldest
flames
reduction
1860
volunteers
##tis
reporting
##hl
wire
advisory
endemic
origins
settlers
pursue
knock
consumer
1876
eu
compound
creatures
mansion
sentenced
ivan
deployed
guitars
frowned
involves
mechanism
kilometers
perspective
shops
maps
terminus
duncan
alien
fist
bridges
##pers
heroes
fed
derby
swallowed
##ros
patent
sara
illness
characterized
adventures
slide
hawaii
jurisdiction
##op
organised
##side
adelaide
walks
biology
se
##ties
rogers
swing
tightly
boundaries
##rie
prepare
implementation
stolen
##sha
certified
colombia
edwards
garage
##mm
recalled
##ball
rage
harm
nigeria
breast
##ren
furniture
pupils
settle
##lus
cuba
balls
client
alaska
21st
linear
thrust
celebration
latino
genetic
terror
##cia
##ening
lightning
fee
witness
lodge
establishing
skull
##ique
earning
hood
##ei
rebellion
wang
sporting
warned
missile
devoted
activist
porch
worship
fourteen
package
1871
decorated
##shire
housed
##ock
chess
sailed
doctors
oscar
joan
treat
garcia
harbour
jeremy
##ire
traditions
dominant
jacques
##gon
##wan
relocated
1879
amendment
sized
companion
simultaneously
volleyball
spun
acre
increases
stopping
loves
belongs
affect
drafted
tossed
scout
battles
1875
filming
shoved
munich
tenure
vertical
romance
pc
##cher
argue
##ical
craft
ranging
www
opens
honest
tyler
yesterday
virtual
##let
muslims
reveal
snake
immigrants
radical
screaming
speakers
firing
saving
belonging
ease
lighting
prefecture
blame
farmer
hungry
grows
rubbed
beam
sur
subsidiary
##cha
armenian
sao
dropping
conventional
##fer
microsoft
reply
qualify
spots
1867
sweat
festivals
##ken
immigration
physician
discover
exposure
sandy
explanation
isaac
implemented
##fish
hart
initiated
connect
stakes
presents
heights
householder
pleased
tourist
regardless
slip
closest
##ction
surely
sultan
brings
riley
preparation
aboard
slammed
baptist
experiment
ongoing
interstate
organic
playoffs
##ika
1877
130
##tar
hindu
error
tours
tier
plenty
arrangements
talks
trapped
excited
sank
ho
athens
1872
denver
welfare
suburb
athletes
trick
diverse
belly
exclusively
yelled
1868
##med
conversion
##ette
1874
internationally
computers
conductor
abilities
sensitive
hello
dispute
measured
globe
rocket
prices
amsterdam
flights
tigers
inn
municipalities
emotion
references
3d
##mus
explains
airlines
manufactured
pm
archaeological
1873
interpretation
devon
comment
##ites
settlements
kissing
absolute
improvement
suite
impressed
barcelona
sullivan
jefferson
towers
jesse
julie
##tin
##lu
grandson
hi
gauge
regard
rings
interviews
trace
raymond
thumb
departments
burns
serial
bulgarian
scores
demonstrated
##ix
1866
kyle
alberta
underneath
romanized
##ward
relieved
acquisition
phrase
cliff
reveals
han
cuts
merger
custom
##dar
nee
gilbert
graduation
##nts
assessment
cafe
difficulty
demands
swung
democrat
jennifer
commons
1940s
grove
##yo
completing
focuses
sum
substitute
bearing
stretch
reception
##py
reflected
essentially
destination
pairs
##ched
survival
resource
##bach
promoting
doubles
messages
tear
##down
##fully
parade
florence
harvey
incumbent
partial
framework
900
pedro
frozen
procedure
olivia
controls
##mic
shelter
personally
temperatures
##od
brisbane
tested
sits
marble
comprehensive
oxygen
leonard
##kov
inaugural
iranian
referring
quarters
attitude
##ivity
mainstream
lined
mars
dakota
norfolk
unsuccessful
##°
explosion
helicopter
congressional
##sing
inspector
bitch
seal
departed
divine
##ters
coaching
examination
punishment
manufacturer
sink
columns
unincorporated
signals
nevada
squeezed
dylan
dining
photos
martial
manuel
eighteen
elevator
brushed
plates
ministers
ivy
congregation
##len
slept
specialized
taxes
curve
restricted
negotiations
likes
statistical
arnold
inspiration
execution
bold
intermediate
significance
margin
ruler
wheels
gothic
intellectual
dependent
listened
eligible
buses
widow
syria
earn
cincinnati
collapsed
recipient
secrets
accessible
philippine
maritime
goddess
clerk
surrender
breaks
playoff
database
##ified
##lon
ideal
beetle
aspect
soap
regulation
strings
expand
anglo
shorter
crosses
retreat
tough
coins
wallace
directions
pressing
##oon
shipping
locomotives
comparison
topics
nephew
##mes
distinction
honors
travelled
sierra
ibn
##over
fortress
sa
recognised
carved
1869
clients
##dan
intent
##mar
coaches
describing
bread
##ington
beaten
northwestern
##ona
merit
youtube
collapse
challenges
em
historians
objective
submitted
virus
attacking
drake
assume
##ere
diseases
marc
stem
leeds
##cus
##ab
farming
glasses
##lock
visits
nowhere
fellowship
relevant
carries
restaurants
experiments
101
constantly
bases
targets
shah
tenth
opponents
verse
territorial
##ira
writings
corruption
##hs
instruction
inherited
reverse
emphasis
##vic
employee
arch
keeps
rabbi
watson
payment
uh
##ala
nancy
##tre
venice
fastest
sexy
banned
adrian
properly
ruth
touchdown
dollar
boards
metre
circles
edges
favour
comments
ok
travels
liberation
scattered
firmly
##ular
holland
permitted
diesel
kenya
den
originated
##ral
demons
resumed
dragged
rider
##rus
servant
blinked
extend
torn
##ias
##sey
input
meal
everybody
cylinder
kinds
camps
##fe
bullet
logic
##wn
croatian
evolved
healthy
fool
chocolate
wise
preserve
pradesh
##ess
respective
1850
##ew
chicken
artificial
gross
corresponding
convicted
cage
caroline
dialogue
##dor
narrative
stranger
mario
br
christianity
failing
trent
commanding
buddhist
1848
maurice
focusing
yale
bike
altitude
##ering
mouse
revised
##sley
veteran
##ig
pulls
theology
crashed
campaigns
legion
##ability
drag
excellence
customer
cancelled
intensity
excuse
##lar
liga
participating
contributing
printing
##burn
variable
##rk
curious
bin
legacy
renaissance
##my
symptoms
binding
vocalist
dancer
##nie
grammar
gospel
democrats
ya
enters
sc
diplomatic
hitler
##ser
clouds
mathematical
quit
defended
oriented
##heim
fundamental
hardware
impressive
equally
convince
confederate
guilt
chuck
sliding
##ware
magnetic
narrowed
petersburg
bulgaria
otto
phd
skill
##ama
reader
hopes
pitcher
reservoir
hearts
automatically
expecting
mysterious
bennett
extensively
imagined
seeds
monitor
fix
##ative
journalism
struggling
signature
ranch
encounter
photographer
observation
protests
##pin
influences
##hr
calendar
##all
cruz
croatia
locomotive
hughes
naturally
shakespeare
basement
hook
uncredited
faded
theories
approaches
dare
phillips
filling
fury
obama
##ain
efficient
arc
deliver
min
raid
breeding
inducted
leagues
efficiency
axis
montana
eagles
##ked
supplied
instructions
karen
picking
indicating
trap
anchor
practically
christians
tomb
vary
occasional
electronics
lords
readers
newcastle
faint
innovation
collect
situations
engagement
160
claude
mixture
##feld
peer
tissue
logo
lean
##ration
°f
floors
##ven
architects
reducing
##our
##ments
rope
1859
ottawa
##har
samples
banking
declaration
proteins
resignation
francois
saudi
advocate
exhibited
armor
twins
divorce
##ras
abraham
reviewed
jo
temporarily
matrix
physically
pulse
curled
##ena
difficulties
bengal
usage
##ban
annie
riders
certificate
##pi
holes
warsaw
distinctive
jessica
##mon
mutual
1857
customs
circular
eugene
removal
loaded
mere
vulnerable
depicted
generations
dame
heir
enormous
lightly
climbing
pitched
lessons
pilots
nepal
ram
google
preparing
brad
louise
renowned
##₂
liam
##ably
plaza
shaw
sophie
brilliant
bills
##bar
##nik
fucking
mainland
server
pleasant
seized
veterans
jerked
fail
beta
brush
radiation
stored
warmth
southeastern
nate
sin
raced
berkeley
joke
athlete
designation
trunk
##low
roland
qualification
archives
heels
artwork
receives
judicial
reserves
##bed
woke
installation
abu
floating
fake
lesser
excitement
interface
concentrated
addressed
characteristic
amanda
saxophone
monk
auto
##bus
releasing
egg
dies
interaction
defender
ce
outbreak
glory
loving
##bert
sequel
consciousness
http
awake
ski
enrolled
##ress
handling
rookie
brow
somebody
biography
warfare
amounts
contracts
presentation
fabric
dissolved
challenged
meter
psychological
lt
elevated
rally
accurate
##tha
hospitals
undergraduate
specialist
venezuela
exhibit
shed
nursing
protestant
fluid
structural
footage
jared
consistent
prey
##ska
succession
reflect
exile
lebanon
wiped
suspect
shanghai
resting
integration
preservation
marvel
variant
pirates
sheep
rounded
capita
sailing
colonies
manuscript
deemed
variations
clarke
functional
emerging
boxing
relaxed
curse
azerbaijan
heavyweight
nickname
editorial
rang
grid
tightened
earthquake
flashed
miguel
rushing
##ches
improvements
boxes
brooks
180
consumption
molecular
felix
societies
repeatedly
variation
aids
civic
graphics
professionals
realm
autonomous
receiver
delayed
workshop
militia
chairs
trump
canyon
##point
harsh
extending
lovely
happiness
##jan
stake
eyebrows
embassy
wellington
hannah
##ella
sony
corners
bishops
swear
cloth
contents
xi
namely
commenced
1854
stanford
nashville
courage
graphic
commitment
garrison
##bin
hamlet
clearing
rebels
attraction
literacy
cooking
ruins
temples
jenny
humanity
celebrate
hasn
freight
sixty
rebel
bastard
##art
newton
##ada
deer
##ges
##ching
smiles
delaware
singers
##ets
approaching
assists
flame
##ph
boulevard
barrel
planted
##ome
pursuit
##sia
consequences
posts
shallow
invitation
rode
depot
ernest
kane
rod
concepts
preston
topic
chambers
striking
blast
arrives
descendants
montgomery
ranges
worlds
##lay
##ari
span
chaos
praise
##ag
fewer
1855
sanctuary
mud
fbi
##ions
programmes
maintaining
unity
harper
bore
handsome
closure
tournaments
thunder
nebraska
linda
facade
puts
satisfied
argentine
dale
cork
dome
panama
##yl
1858
tasks
experts
##ates
feeding
equation
##las
##ida
##tu
engage
bryan
##ax
um
quartet
melody
disbanded
sheffield
blocked
gasped
delay
kisses
maggie
connects
##non
sts
poured
creator
publishers
##we
guided
ellis
extinct
hug
gaining
##ord
complicated
##bility
poll
clenched
investigate
##use
thereby
quantum
spine
cdp
humor
kills
administered
semifinals
##du
encountered
ignore
##bu
commentary
##maker
bother
roosevelt
140
plains
halfway
flowing
cultures
crack
imprisoned
neighboring
airline
##ses
##view
##mate
##ec
gather
wolves
marathon
transformed
##ill
cruise
organisations
carol
punch
exhibitions
numbered
alarm
ratings
daddy
silently
##stein
queens
colours
impression
guidance
liu
tactical
##rat
marshal
della
arrow
##ings
rested
feared
tender
owns
bitter
advisor
escort
##ides
spare
farms
grants
##ene
dragons
encourage
colleagues
cameras
##und
sucked
pile
spirits
prague
statements
suspension
landmark
fence
torture
recreation
bags
permanently
survivors
pond
spy
predecessor
bombing
coup
##og
protecting
transformation
glow
##lands
##book
dug
priests
andrea
feat
barn
jumping
##chen
##ologist
##con
casualties
stern
auckland
pipe
serie
revealing
ba
##bel
trevor
mercy
spectrum
yang
consist
governing
collaborated
possessed
epic
comprises
blew
shane
##ack
lopez
honored
magical
sacrifice
judgment
perceived
hammer
mtv
baronet
tune
das
missionary
sheets
350
neutral
oral
threatening
attractive
shade
aims
seminary
##master
estates
1856
michel
wounds
refugees
manufacturers
##nic
mercury
syndrome
porter
##iya
##din
hamburg
identification
upstairs
purse
widened
pause
cared
breathed
affiliate
santiago
prevented
celtic
fisher
125
recruited
byzantine
reconstruction
farther
##mp
diet
sake
au
spite
sensation
##ert
blank
separation
105
##hon
vladimir
armies
anime
##lie
accommodate
orbit
cult
sofia
archive
##ify
##box
founders
sustained
disorder
honours
northeastern
mia
crops
violet
threats
blanket
fires
canton
followers
southwestern
prototype
voyage
assignment
altered
moderate
protocol
pistol
##eo
questioned
brass
lifting
1852
math
authored
##ual
doug
dimensional
dynamic
##san
1851
pronounced
grateful
quest
uncomfortable
boom
presidency
stevens
relating
politicians
chen
barrier
quinn
diana
mosque
tribal
cheese
palmer
portions
sometime
chester
treasure
wu
bend
download
millions
reforms
registration
##osa
consequently
monitoring
ate
preliminary
brandon
invented
ps
eaten
exterior
intervention
ports
documented
log
displays
lecture
sally
favourite
##itz
vermont
lo
invisible
isle
breed
##ator
journalists
relay
speaks
backward
explore
midfielder
actively
stefan
procedures
cannon
blond
kenneth
centered
servants
chains
libraries
malcolm
essex
henri
slavery
##hal
facts
fairy
coached
cassie
cats
washed
cop
##fi
announcement
item
2000s
vinyl
activated
marco
frontier
growled
curriculum
##das
loyal
accomplished
leslie
ritual
kenny
##00
vii
napoleon
hollow
hybrid
jungle
stationed
friedrich
counted
##ulated
platinum
theatrical
seated
col
rubber
glen
1840
diversity
healing
extends
id
provisions
administrator
columbus
##oe
tributary
te
assured
org
##uous
prestigious
examined
lectures
grammy
ronald
associations
bailey
allan
essays
flute
believing
consultant
proceedings
travelling
1853
kit
kerala
yugoslavia
buddy
methodist
##ith
burial
centres
batman
##nda
discontinued
bo
dock
stockholm
lungs
severely
##nk
citing
manga
##ugh
steal
mumbai
iraqi
robot
celebrity
bride
broadcasts
abolished
pot
joel
overhead
franz
packed
reconnaissance
johann
acknowledged
introduce
handled
doctorate
developments
drinks
alley
palestine
##nis
##aki
proceeded
recover
bradley
grain
patch
afford
infection
nationalist
legendary
##ath
interchange
virtually
gen
gravity
exploration
amber
vital
wishes
powell
doctrine
elbow
screenplay
##bird
contribute
indonesian
pet
creates
##com
enzyme
kylie
discipline
drops
manila
hunger
##ien
layers
suffer
fever
bits
monica
keyboard
manages
##hood
searched
appeals
##bad
testament
grande
reid
##war
beliefs
congo
##ification
##dia
si
requiring
##via
casey
1849
regret
streak
rape
depends
syrian
sprint
pound
tourists
upcoming
pub
##xi
tense
##els
practiced
echo
nationwide
guild
motorcycle
liz
##zar
chiefs
desired
elena
bye
precious
absorbed
relatives
booth
pianist
##mal
citizenship
exhausted
wilhelm
##ceae
##hed
noting
quarterback
urge
hectares
##gue
ace
holly
##tal
blonde
davies
parked
sustainable
stepping
twentieth
airfield
galaxy
nest
chip
##nell
tan
shaft
paulo
requirement
##zy
paradise
tobacco
trans
renewed
vietnamese
##cker
##ju
suggesting
catching
holmes
enjoying
md
trips
colt
holder
butterfly
nerve
reformed
cherry
bowling
trailer
carriage
goodbye
appreciate
toy
joshua
interactive
enabled
involve
##kan
collar
determination
bunch
facebook
recall
shorts
superintendent
episcopal
frustration
giovanni
nineteenth
laser
privately
array
circulation
##ovic
armstrong
deals
painful
permit
discrimination
##wi
aires
retiring
cottage
ni
##sta
horizon
ellen
jamaica
ripped
fernando
chapters
playstation
patron
lecturer
navigation
behaviour
genes
georgian
export
solomon
rivals
swift
seventeen
rodriguez
princeton
independently
sox
1847
arguing
entity
casting
hank
criteria
oakland
geographic
milwaukee
reflection
expanding
conquest
dubbed
##tv
halt
brave
brunswick
doi
arched
curtis
divorced
predominantly
somerset
streams
ugly
zoo
horrible
curved
buenos
fierce
dictionary
vector
theological
unions
handful
stability
chan
punjab
segments
##lly
altar
ignoring
gesture
monsters
pastor
##stone
thighs
unexpected
operators
abruptly
coin
compiled
associates
improving
migration
pin
##ose
compact
collegiate
reserved
##urs
quarterfinals
roster
restore
assembled
hurry
oval
##cies
1846
flags
martha
##del
victories
sharply
##rated
argues
deadly
neo
drawings
symbols
performer
##iel
griffin
restrictions
editing
andrews
java
journals
arabia
compositions
dee
pierce
removing
hindi
casino
runway
civilians
minds
nasa
hotels
##zation
refuge
rent
retain
potentially
conferences
suburban
conducting
##tto
##tions
##tle
descended
massacre
##cal
ammunition
terrain
fork
souls
counts
chelsea
durham
drives
cab
##bank
perth
realizing
palestinian
finn
simpson
##dal
betty
##ule
moreover
particles
cardinals
tent
evaluation
extraordinary
##oid
inscription
##works
wednesday
chloe
maintains
panels
ashley
trucks
##nation
cluster
sunlight
strikes
zhang
##wing
dialect
canon
##ap
tucked
##ws
collecting
##mas
##can
##sville
maker
quoted
evan
franco
aria
buying
cleaning
eva
closet
provision
apollo
clinic
rat
##ez
necessarily
ac
##gle
##ising
venues
flipped
cent
spreading
trustees
checking
authorized
##sco
disappointed
##ado
notion
duration
trumpet
hesitated
topped
brussels
rolls
theoretical
hint
define
aggressive
repeat
wash
peaceful
optical
width
allegedly
mcdonald
strict
copyright
##illa
investors
mar
jam
witnesses
sounding
miranda
michelle
privacy
hugo
harmony
##pp
valid
lynn
glared
nina
102
headquartered
diving
boarding
gibson
##ncy
albanian
marsh
routine
dealt
enhanced
er
intelligent
substance
targeted
enlisted
discovers
spinning
observations
pissed
smoking
rebecca
capitol
visa
varied
costume
seemingly
indies
compensation
surgeon
thursday
arsenal
westminster
suburbs
rid
anglican
##ridge
knots
foods
alumni
lighter
fraser
whoever
portal
scandal
##ray
gavin
advised
instructor
flooding
terrorist
##ale
teenage
interim
senses
duck
teen
thesis
abby
eager
overcome
##ile
newport
glenn
rises
shame
##cc
prompted
priority
forgot
bomber
nicolas
protective
360
cartoon
katherine
breeze
lonely
trusted
henderson
richardson
relax
banner
candy
palms
remarkable
##rio
legends
cricketer
essay
ordained
edmund
rifles
trigger
##uri
##away
sail
alert
1830
audiences
penn
sussex
siblings
pursued
indianapolis
resist
rosa
consequence
succeed
avoided
1845
##ulation
inland
##tie
##nna
counsel
profession
chronicle
hurried
##una
eyebrow
eventual
bleeding
innovative
cure
##dom
committees
accounting
con
scope
hardy
heather
tenor
gut
herald
codes
tore
scales
wagon
##oo
luxury
tin
prefer
fountain
triangle
bonds
darling
convoy
dried
traced
beings
troy
accidentally
slam
findings
smelled
joey
lawyers
outcome
steep
bosnia
configuration
shifting
toll
brook
performers
lobby
philosophical
construct
shrine
aggregate
boot
cox
phenomenon
savage
insane
solely
reynolds
lifestyle
##ima
nationally
holdings
consideration
enable
edgar
mo
mama
##tein
fights
relegation
chances
atomic
hub
conjunction
awkward
reactions
currency
finale
kumar
underwent
steering
elaborate
gifts
comprising
melissa
veins
reasonable
sunshine
chi
solve
trails
inhabited
elimination
ethics
huh
ana
molly
consent
apartments
layout
marines
##ces
hunters
bulk
##oma
hometown
##wall
##mont
cracked
reads
neighbouring
withdrawn
admission
wingspan
damned
anthology
lancashire
brands
batting
forgive
cuban
awful
##lyn
104
dimensions
imagination
##ade
dante
##ship
tracking
desperately
goalkeeper
##yne
groaned
workshops
confident
burton
gerald
milton
circus
uncertain
slope
copenhagen
sophia
fog
philosopher
portraits
accent
cycling
varying
gripped
larvae
garrett
specified
scotia
mature
luther
kurt
rap
##kes
aerial
750
ferdinand
heated
es
transported
##shan
safely
nonetheless
##orn
##gal
motors
demanding
##sburg
startled
##brook
ally
generate
caps
ghana
stained
demo
mentions
beds
ap
afterward
diary
##bling
utility
##iro
richards
1837
conspiracy
conscious
shining
footsteps
observer
cyprus
urged
loyalty
developer
probability
olive
upgraded
gym
miracle
insects
graves
1844
ourselves
hydrogen
amazon
katie
tickets
poets
##pm
planes
##pan
prevention
witnessed
dense
jin
randy
tang
warehouse
monroe
bang
archived
elderly
investigations
alec
granite
mineral
conflicts
controlling
aboriginal
carlo
##zu
mechanics
stan
stark
rhode
skirt
est
##berry
bombs
respected
##horn
imposed
limestone
deny
nominee
memphis
grabbing
disabled
##als
amusement
aa
frankfurt
corn
referendum
varies
slowed
disk
firms
unconscious
incredible
clue
sue
##zhou
twist
##cio
joins
idaho
chad
developers
computing
destroyer
103
mortal
tucker
kingston
choices
yu
carson
1800
os
whitney
geneva
pretend
dimension
staged
plateau
maya
##une
freestyle
##bc
rovers
hiv
##ids
tristan
classroom
prospect
##hus
honestly
diploma
lied
thermal
auxiliary
feast
unlikely
iata
##tel
morocco
pounding
treasury
lithuania
considerably
1841
dish
1812
geological
matching
stumbled
destroying
marched
brien
advances
cake
nicole
belle
settling
measuring
directing
##mie
tuesday
bassist
capabilities
stunned
fraud
torpedo
##list
##phone
anton
wisdom
surveillance
ruined
##ulate
lawsuit
healthcare
theorem
halls
trend
aka
horizontal
dozens
acquire
lasting
swim
hawk
gorgeous
fees
vicinity
decrease
adoption
tactics
##ography
pakistani
##ole
draws
##hall
willie
burke
heath
algorithm
integral
powder
elliott
brigadier
jackie
tate
varieties
darker
##cho
lately
cigarette
specimens
adds
##ree
##ensis
##inger
exploded
finalist
cia
murders
wilderness
arguments
nicknamed
acceptance
onwards
manufacture
robertson
jets
tampa
enterprises
blog
loudly
composers
nominations
1838
ai
malta
inquiry
automobile
hosting
viii
rays
tilted
grief
museums
strategies
furious
euro
equality
cohen
poison
surrey
wireless
governed
ridiculous
moses
##esh
##room
vanished
##ito
barnes
attract
morrison
istanbul
##iness
absent
rotation
petition
janet
##logical
satisfaction
custody
deliberately
observatory
comedian
surfaces
pinyin
novelist
strictly
canterbury
oslo
monks
embrace
ibm
jealous
photograph
continent
dorothy
marina
doc
excess
holden
allegations
explaining
stack
avoiding
lance
storyline
majesty
poorly
spike
dos
bradford
raven
travis
classics
proven
voltage
pillow
fists
butt
1842
interpreted
##car
1839
gage
telegraph
lens
promising
expelled
casual
collector
zones
##min
silly
nintendo
##kh
##bra
downstairs
chef
suspicious
afl
flies
vacant
uganda
pregnancy
condemned
lutheran
estimates
cheap
decree
saxon
proximity
stripped
idiot
deposits
contrary
presenter
magnus
glacier
im
offense
edwin
##ori
upright
##long
bolt
##ois
toss
geographical
##izes
environments
delicate
marking
abstract
xavier
nails
windsor
plantation
occurring
equity
saskatchewan
fears
drifted
sequences
vegetation
revolt
##stic
1843
sooner
fusion
opposing
nato
skating
1836
secretly
ruin
lease
##oc
edit
##nne
flora
anxiety
ruby
##ological
##mia
tel
bout
taxi
emmy
frost
rainbow
compounds
foundations
rainfall
assassination
nightmare
dominican
##win
achievements
deserve
orlando
intact
armenia
##nte
calgary
valentine
106
marion
proclaimed
theodore
bells
courtyard
thigh
gonzalez
console
troop
minimal
monte
everyday
##ence
##if
supporter
terrorism
buck
openly
presbyterian
activists
carpet
##iers
rubbing
uprising
##yi
cute
conceived
legally
##cht
millennium
cello
velocity
ji
rescued
cardiff
1835
rex
concentrate
senators
beard
rendered
glowing
battalions
scouts
competitors
sculptor
catalogue
arctic
ion
raja
bicycle
wow
glancing
lawn
##woman
gentleman
lighthouse
publish
predicted
calculated
##val
variants
##gne
strain
##ui
winston
deceased
##nus
touchdowns
brady
caleb
sinking
echoed
crush
hon
blessed
protagonist
hayes
endangered
magnitude
editors
##tine
estimate
responsibilities
##mel
backup
laying
consumed
sealed
zurich
lovers
frustrated
##eau
ahmed
kicking
mit
treasurer
1832
biblical
refuse
terrified
pump
agrees
genuine
imprisonment
refuses
plymouth
##hen
lou
##nen
tara
trembling
antarctic
ton
learns
##tas
crap
crucial
faction
atop
##borough
wrap
lancaster
odds
hopkins
erik
lyon
##eon
bros
##ode
snap
locality
tips
empress
crowned
cal
acclaimed
chuckled
##ory
clara
sends
mild
towel
##fl
##day
##а
wishing
assuming
interviewed
##bal
##die
interactions
eden
cups
helena
##lf
indie
beck
##fire
batteries
filipino
wizard
parted
##lam
traces
##born
rows
idol
albany
delegates
##ees
##sar
discussions
##ex
notre
instructed
belgrade
highways
suggestion
lauren
possess
orientation
alexandria
abdul
beats
salary
reunion
ludwig
alright
wagner
intimate
pockets
slovenia
hugged
brighton
merchants
cruel
stole
trek
slopes
repairs
enrollment
politically
underlying
promotional
counting
boeing
##bb
isabella
naming
##и
keen
bacteria
listing
separately
belfast
ussr
450
lithuanian
anybody
ribs
sphere
martinez
cock
embarrassed
proposals
fragments
nationals
##fs
##wski
premises
fin
1500
alpine
matched
freely
bounded
jace
sleeve
##af
gaming
pier
populated
evident
##like
frances
flooded
##dle
frightened
pour
trainer
framed
visitor
challenging
pig
wickets
##fold
infected
email
##pes
arose
##aw
reward
ecuador
oblast
vale
ch
shuttle
##usa
bach
rankings
forbidden
cornwall
accordance
salem
consumers
bruno
fantastic
toes
machinery
resolved
julius
remembering
propaganda
iceland
bombardment
tide
contacts
wives
##rah
concerto
macdonald
albania
implement
daisy
tapped
sudan
helmet
angela
mistress
##lic
crop
sunk
finest
##craft
hostile
##ute
##tsu
boxer
fr
paths
adjusted
habit
ballot
supervision
soprano
##zen
bullets
wicked
sunset
regiments
disappear
lamp
performs
app
##gia
##oa
rabbit
digging
incidents
entries
##cion
dishes
##oi
introducing
##ati
##fied
freshman
slot
jill
tackles
baroque
backs
##iest
lone
sponsor
destiny
altogether
convert
##aro
consensus
shapes
demonstration
basically
feminist
auction
artifacts
##bing
strongest
twitter
halifax
2019
allmusic
mighty
smallest
precise
alexandra
viola
##los
##ille
manuscripts
##illo
dancers
ari
managers
monuments
blades
barracks
springfield
maiden
consolidated
electron
##end
berry
airing
wheat
nobel
inclusion
blair
payments
geography
bee
cc
eleanor
react
##hurst
afc
manitoba
##yu
su
lineup
fitness
recreational
investments
airborne
disappointment
##dis
edmonton
viewing
##row
renovation
##cast
infant
bankruptcy
roses
aftermath
pavilion
##yer
carpenter
withdrawal
ladder
##hy
discussing
popped
reliable
agreements
rochester
##abad
curves
bombers
220
rao
reverend
decreased
choosing
107
stiff
consulting
naples
crawford
tracy
ka
ribbon
cops
##lee
crushed
deciding
unified
teenager
accepting
flagship
explorer
poles
sanchez
inspection
revived
skilled
induced
exchanged
flee
locals
tragedy
swallow
loading
hanna
demonstrate
##ela
salvador
flown
contestants
civilization
##ines
wanna
rhodes
fletcher
hector
knocking
considers
##ough
nash
mechanisms
sensed
mentally
walt
unclear
##eus
renovated
madame
##cks
crews
governmental
##hin
undertaken
monkey
##ben
##ato
fatal
armored
copa
caves
governance
grasp
perception
certification
froze
damp
tugged
wyoming
##rg
##ero
newman
##lor
nerves
curiosity
graph
115
##ami
withdraw
tunnels
dull
meredith
moss
exhibits
neighbors
communicate
accuracy
explored
raiders
republicans
secular
kat
superman
penny
criticised
##tch
freed
update
conviction
wade
ham
likewise
delegation
gotta
doll
promises
technological
myth
nationality
resolve
convent
##mark
sharon
dig
sip
coordinator
entrepreneur
fold
##dine
capability
councillor
synonym
blown
swan
cursed
1815
jonas
haired
sofa
canvas
keeper
rivalry
##hart
rapper
speedway
swords
postal
maxwell
estonia
potter
recurring
##nn
##ave
errors
##oni
cognitive
1834
##²
claws
nadu
roberto
bce
wrestler
ellie
##ations
infinite
ink
##tia
presumably
finite
staircase
108
noel
patricia
nacional
##cation
chill
eternal
tu
preventing
prussia
fossil
limbs
##logist
ernst
frog
perez
rene
##ace
pizza
prussian
##ios
##vy
molecules
regulatory
answering
opinions
sworn
lengths
supposedly
hypothesis
upward
habitats
seating
ancestors
drank
yield
hd
synthesis
researcher
modest
##var
mothers
peered
voluntary
homeland
##the
acclaim
##igan
static
valve
luxembourg
alto
carroll
fe
receptor
norton
ambulance
##tian
johnston
catholics
depicting
jointly
elephant
gloria
mentor
badge
ahmad
distinguish
remarked
councils
precisely
allison
advancing
detection
crowded
##10
cooperative
ankle
mercedes
dagger
surrendered
pollution
commit
subway
jeffrey
lesson
sculptures
provider
##fication
membrane
timothy
rectangular
fiscal
heating
teammate
basket
particle
anonymous
deployment
##ple
missiles
courthouse
proportion
shoe
sec
##ller
complaints
forbes
blacks
abandon
remind
sizes
overwhelming
autobiography
natalie
##awa
risks
contestant
countryside
babies
scorer
invaded
enclosed
proceed
hurling
disorders
##cu
reflecting
continuously
cruiser
graduates
freeway
investigated
ore
deserved
maid
blocking
phillip
jorge
shakes
dove
mann
variables
lacked
burden
accompanying
que
consistently
organizing
provisional
complained
endless
##rm
tubes
juice
georges
krishna
mick
labels
thriller
##uch
laps
arcade
sage
snail
##table
shannon
fi
laurence
seoul
vacation
presenting
hire
churchill
surprisingly
prohibited
savannah
technically
##oli
170
##lessly
testimony
suited
speeds
toys
romans
mlb
flowering
measurement
talented
kay
settings
charleston
expectations
shattered
achieving
triumph
ceremonies
portsmouth
lanes
mandatory
loser
stretching
cologne
realizes
seventy
cornell
careers
webb
##ulating
americas
budapest
ava
suspicion
##ison
yo
conrad
##hai
sterling
jessie
rector
##az
1831
transform
organize
loans
christine
volcanic
warrant
slender
summers
subfamily
newer
danced
dynamics
rhine
proceeds
heinrich
gastropod
commands
sings
facilitate
easter
ra
positioned
responses
expense
fruits
yanked
imported
25th
velvet
vic
primitive
tribune
baldwin
neighbourhood
donna
rip
hay
pr
##uro
1814
espn
welcomed
##aria
qualifier
glare
highland
timing
##cted
shells
eased
geometry
louder
exciting
slovakia
##sion
##iz
##lot
savings
prairie
##ques
marching
rafael
tonnes
##lled
curtain
preceding
shy
heal
greene
worthy
##pot
detachment
bury
sherman
##eck
reinforced
seeks
bottles
contracted
duchess
outfit
walsh
##sc
mickey
##ase
geoffrey
archer
squeeze
dawson
eliminate
invention
##enberg
neal
##eth
stance
dealer
coral
maple
retire
polo
simplified
##ht
1833
hid
watts
backwards
jules
##oke
genesis
mt
frames
rebounds
burma
woodland
moist
santos
whispers
drained
subspecies
##aa
streaming
ulster
burnt
correspondence
maternal
gerard
denis
stealing
##load
genius
duchy
##oria
inaugurated
momentum
suits
placement
sovereign
clause
thames
##hara
confederation
reservation
sketch
yankees
lets
rotten
charm
hal
verses
ultra
commercially
dot
salon
citation
adopt
winnipeg
mist
allocated
cairo
##boy
jenkins
interference
objectives
##wind
1820
portfolio
armoured
sectors
##eh
initiatives
##world
integrity
exercises
robe
tap
ab
gazed
##tones
distracted
rulers
111
favorable
jerome
tended
cart
factories
##eri
diplomat
valued
gravel
charitable
##try
calvin
exploring
chang
shepherd
terrace
pdf
pupil
##ural
reflects
ups
##rch
governors
shelf
depths
##nberg
trailed
crest
tackle
##nian
##ats
hatred
##kai
clare
makers
ethiopia
longtime
detected
embedded
lacking
slapped
rely
thomson
anticipation
iso
morton
successive
agnes
screenwriter
straightened
philippe
playwright
haunted
licence
iris
intentions
sutton
112
logical
correctly
##weight
branded
licked
tipped
silva
ricky
narrator
requests
##ents
greeted
supernatural
cow
##wald
lung
refusing
employer
strait
gaelic
liner
##piece
zoe
sabha
##mba
driveway
harvest
prints
bates
reluctantly
threshold
algebra
ira
wherever
coupled
240
assumption
picks
##air
designers
raids
gentlemen
##ean
roller
blowing
leipzig
locks
screw
dressing
strand
##lings
scar
dwarf
depicts
##nu
nods
##mine
differ
boris
##eur
yuan
flip
##gie
mob
invested
questioning
applying
##ture
shout
##sel
gameplay
blamed
illustrations
bothered
weakness
rehabilitation
##of
##zes
envelope
rumors
miners
leicester
subtle
kerry
##ico
ferguson
##fu
premiership
ne
##cat
bengali
prof
catches
remnants
dana
##rily
shouting
presidents
baltic
ought
ghosts
dances
sailors
shirley
fancy
dominic
##bie
madonna
##rick
bark
buttons
gymnasium
ashes
liver
toby
oath
providence
doyle
evangelical
nixon
cement
carnegie
embarked
hatch
surroundings
guarantee
needing
pirate
essence
##bee
filter
crane
hammond
projected
immune
percy
twelfth
##ult
regent
doctoral
damon
mikhail
##ichi
lu
critically
elect
realised
abortion
acute
screening
mythology
steadily
##fc
frown
nottingham
kirk
wa
minneapolis
##rra
module
algeria
mc
nautical
encounters
surprising
statues
availability
shirts
pie
alma
brows
munster
mack
soup
crater
tornado
sanskrit
cedar
explosive
bordered
dixon
planets
stamp
exam
happily
##bble
carriers
kidnapped
##vis
accommodation
emigrated
##met
knockout
correspondent
violation
profits
peaks
lang
specimen
agenda
ancestry
pottery
spelling
equations
obtaining
ki
linking
1825
debris
asylum
##20
buddhism
teddy
##ants
gazette
##nger
##sse
dental
eligibility
utc
fathers
averaged
zimbabwe
francesco
coloured
hissed
translator
lynch
mandate
humanities
mackenzie
uniforms
lin
##iana
##gio
asset
mhz
fitting
samantha
genera
wei
rim
beloved
shark
riot
entities
expressions
indo
carmen
slipping
owing
abbot
neighbor
sidney
##av
rats
recommendations
encouraging
squadrons
anticipated
commanders
conquered
##oto
donations
diagnosed
##mond
divide
##iva
guessed
decoration
vernon
auditorium
revelation
conversations
##kers
##power
herzegovina
dash
alike
protested
lateral
herman
accredited
mg
##gent
freeman
mel
fiji
crow
crimson
##rine
livestock
##pped
humanitarian
bored
oz
whip
##lene
##ali
legitimate
alter
grinning
spelled
anxious
oriental
wesley
##nin
##hole
carnival
controller
detect
##ssa
bowed
educator
kosovo
macedonia
##sin
occupy
mastering
stephanie
janeiro
para
unaware
nurses
noon
135
cam
hopefully
ranger
combine
sociology
polar
rica
##eer
neill
##sman
holocaust
##ip
doubled
lust
1828
109
decent
cooling
unveiled
##card
1829
nsw
homer
chapman
meyer
##gin
dive
mae
reagan
expertise
##gled
darwin
brooke
sided
prosecution
investigating
comprised
petroleum
genres
reluctant
differently
trilogy
johns
vegetables
corpse
highlighted
lounge
pension
unsuccessfully
elegant
aided
ivory
beatles
amelia
cain
dubai
sunny
immigrant
babe
click
##nder
underwater
pepper
combining
mumbled
atlas
horns
accessed
ballad
physicians
homeless
gestured
rpm
freak
louisville
corporations
patriots
prizes
rational
warn
modes
decorative
overnight
din
troubled
phantom
##ort
monarch
sheer
##dorf
generals
guidelines
organs
addresses
##zon
enhance
curling
parishes
cord
##kie
linux
caesar
deutsche
bavaria
##bia
coleman
cyclone
##eria
bacon
petty
##yama
##old
hampton
diagnosis
1824
throws
complexity
rita
disputed
##₃
pablo
##sch
marketed
trafficking
##ulus
examine
plague
formats
##oh
vault
faithful
##bourne
webster
##ox
highlights
##ient
##ann
phones
vacuum
sandwich
modeling
##gated
bolivia
clergy
qualities
isabel
##nas
##ars
wears
screams
reunited
annoyed
bra
##ancy
##rate
differential
transmitter
tattoo
container
poker
##och
excessive
resides
cowboys
##tum
augustus
trash
providers
statute
retreated
balcony
reversed
void
storey
preceded
masses
leap
laughs
neighborhoods
wards
schemes
falcon
santo
battlefield
pad
ronnie
thread
lesbian
venus
##dian
beg
sandstone
daylight
punched
gwen
analog
stroked
wwe
acceptable
measurements
dec
toxic
##kel
adequate
surgical
economist
parameters
varsity
##sberg
quantity
ella
##chy
##rton
countess
generating
precision
diamonds
expressway
ga
##ı
1821
uruguay
talents
galleries
expenses
scanned
colleague
outlets
ryder
lucien
##ila
paramount
##bon
syracuse
dim
fangs
gown
sweep
##sie
toyota
missionaries
websites
##nsis
sentences
adviser
val
trademark
spells
##plane
patience
starter
slim
##borg
toe
incredibly
shoots
elliot
nobility
##wyn
cowboy
endorsed
gardner
tendency
persuaded
organisms
emissions
kazakhstan
amused
boring
chips
themed
##hand
llc
constantinople
chasing
systematic
guatemala
borrowed
erin
carey
##hard
highlands
struggles
1810
##ifying
##ced
wong
exceptions
develops
enlarged
kindergarten
castro
##ern
##rina
leigh
zombie
juvenile
##most
consul
##nar
sailor
hyde
clarence
intensive
pinned
nasty
useless
jung
clayton
stuffed
exceptional
ix
apostolic
230
transactions
##dge
exempt
swinging
cove
religions
##ash
shields
dairy
bypass
190
pursuing
bug
joyce
bombay
chassis
southampton
chat
interact
redesignated
##pen
nascar
pray
salmon
rigid
regained
malaysian
grim
publicity
constituted
capturing
toilet
delegate
purely
tray
drift
loosely
striker
weakened
trinidad
mitch
itv
defines
transmitted
ming
scarlet
nodding
fitzgerald
fu
narrowly
sp
tooth
standings
virtue
##₁
##wara
##cting
chateau
gloves
lid
##nel
hurting
conservatory
##pel
sinclair
reopened
sympathy
nigerian
strode
advocated
optional
chronic
discharge
##rc
suck
compatible
laurel
stella
shi
fails
wage
dodge
128
informal
sorts
levi
buddha
villagers
##aka
chronicles
heavier
summoned
gateway
3000
eleventh
jewelry
translations
accordingly
seas
##ency
fiber
pyramid
cubic
dragging
##ista
caring
##ops
android
contacted
lunar
##dt
kai
lisbon
patted
1826
sacramento
theft
madagascar
subtropical
disputes
ta
holidays
piper
willow
mare
cane
itunes
newfoundland
benny
companions
dong
raj
observe
roar
charming
plaque
tibetan
fossils
enacted
manning
bubble
tina
tanzania
##eda
##hir
funk
swamp
deputies
cloak
ufc
scenario
par
scratch
metals
anthem
guru
engaging
specially
##boat
dialects
nineteen
cecil
duet
disability
messenger
unofficial
##lies
defunct
eds
moonlight
drainage
surname
puzzle
honda
switching
conservatives
mammals
knox
broadcaster
sidewalk
cope
##ried
benson
princes
peterson
##sal
bedford
sharks
eli
wreck
alberto
gasp
archaeology
lgbt
teaches
securities
madness
compromise
waving
coordination
davidson
visions
leased
possibilities
eighty
jun
fernandez
enthusiasm
assassin
sponsorship
reviewer
kingdoms
estonian
laboratories
##fy
##nal
applies
verb
celebrations
##zzo
rowing
lightweight
sadness
submit
mvp
balanced
dude
##vas
explicitly
metric
magnificent
mound
brett
mohammad
mistakes
irregular
##hing
##ass
sanders
betrayed
shipped
surge
##enburg
reporters
termed
georg
pity
verbal
bulls
abbreviated
enabling
appealed
##are
##atic
sicily
sting
heel
sweetheart
bart
spacecraft
brutal
monarchy
##tter
aberdeen
cameo
diane
##ub
survivor
clyde
##aries
complaint
##makers
clarinet
delicious
chilean
karnataka
coordinates
1818
panties
##rst
pretending
ar
dramatically
kiev
bella
tends
distances
113
catalog
launching
instances
telecommunications
portable
lindsay
vatican
##eim
angles
aliens
marker
stint
screens
bolton
##rne
judy
wool
benedict
plasma
europa
spark
imaging
filmmaker
swiftly
##een
contributor
##nor
opted
stamps
apologize
financing
butter
gideon
sophisticated
alignment
avery
chemicals
yearly
speculation
prominence
professionally
##ils
immortal
institutional
inception
wrists
identifying
tribunal
derives
gains
##wo
papal
preference
linguistic
vince
operative
brewery
##ont
unemployment
boyd
##ured
##outs
albeit
prophet
1813
bi
##rr
##face
##rad
quarterly
asteroid
cleaned
radius
temper
##llen
telugu
jerk
viscount
menu
##ote
glimpse
##aya
yacht
hawaiian
baden
##rl
laptop
readily
##gu
monetary
offshore
scots
watches
##yang
##arian
upgrade
needle
xbox
lea
encyclopedia
flank
fingertips
##pus
delight
teachings
confirm
roth
beaches
midway
winters
##iah
teasing
daytime
beverly
gambling
bonnie
##backs
regulated
clement
hermann
tricks
knot
##shing
##uring
##vre
detached
ecological
owed
specialty
byron
inventor
bats
stays
screened
unesco
midland
trim
affection
##ander
##rry
jess
thoroughly
feedback
##uma
chennai
strained
heartbeat
wrapping
overtime
pleaded
##sworth
mon
leisure
oclc
##tate
##ele
feathers
angelo
thirds
nuts
surveys
clever
gill
commentator
##dos
darren
rides
gibraltar
##nc
##mu
dissolution
dedication
shin
meals
saddle
elvis
reds
chaired
taller
appreciation
functioning
niece
favored
advocacy
robbie
criminals
suffolk
yugoslav
passport
constable
congressman
hastings
vera
##rov
consecrated
sparks
ecclesiastical
confined
##ovich
muller
floyd
nora
1822
paved
1827
cumberland
ned
saga
spiral
##flow
appreciated
yi
collaborative
treating
similarities
feminine
finishes
##ib
jade
import
##nse
##hot
champagne
mice
securing
celebrities
helsinki
attributes
##gos
cousins
phases
ache
lucia
gandhi
submission
vicar
spear
shine
tasmania
biting
detention
constitute
tighter
seasonal
##gus
terrestrial
matthews
##oka
effectiveness
parody
philharmonic
##onic
1816
strangers
encoded
consortium
guaranteed
regards
shifts
tortured
collision
supervisor
inform
broader
insight
theaters
armour
emeritus
blink
incorporates
mapping
##50
##ein
handball
flexible
##nta
substantially
generous
thief
##own
carr
loses
1793
prose
ucla
romeo
generic
metallic
realization
damages
mk
commissioners
zach
default
##ther
helicopters
lengthy
stems
spa
partnered
spectators
rogue
indication
penalties
teresa
1801
sen
##tric
dalton
##wich
irving
photographic
##vey
dell
deaf
peters
excluded
unsure
##vable
patterson
crawled
##zio
resided
whipped
latvia
slower
ecole
pipes
employers
maharashtra
comparable
va
textile
pageant
##gel
alphabet
binary
irrigation
chartered
choked
antoine
offs
waking
supplement
##wen
quantities
demolition
regain
locate
urdu
folks
alt
114
##mc
scary
andreas
whites
##ava
classrooms
mw
aesthetic
publishes
valleys
guides
cubs
johannes
bryant
conventions
affecting
##itt
drain
awesome
isolation
prosecutor
ambitious
apology
captive
downs
atmospheric
lorenzo
aisle
beef
foul
##onia
kidding
composite
disturbed
illusion
natives
##ffer
emi
rockets
riverside
wartime
painters
adolf
melted
##ail
uncertainty
simulation
hawks
progressed
meantime
builder
spray
breach
unhappy
regina
russians
##urg
determining
##tation
tram
1806
##quin
aging
##12
1823
garion
rented
mister
diaz
terminated
clip
1817
depend
nervously
disco
owe
defenders
shiva
notorious
disbelief
shiny
worcester
##gation
##yr
trailing
undertook
islander
belarus
limitations
watershed
fuller
overlooking
utilized
raphael
1819
synthetic
breakdown
klein
##nate
moaned
memoir
lamb
practicing
##erly
cellular
arrows
exotic
##graphy
witches
117
charted
rey
hut
hierarchy
subdivision
freshwater
giuseppe
aloud
reyes
qatar
marty
sideways
utterly
sexually
jude
prayers
mccarthy
softball
blend
damien
##gging
##metric
wholly
erupted
lebanese
negro
revenues
tasted
comparative
teamed
transaction
labeled
maori
sovereignty
parkway
trauma
gran
malay
121
advancement
descendant
2020
buzz
salvation
inventory
symbolic
##making
antarctica
mps
##gas
##bro
mohammed
myanmar
holt
submarines
tones
##lman
locker
patriarch
bangkok
emerson
remarks
predators
kin
afghan
confession
norwich
rental
emerge
advantages
##zel
rca
##hold
shortened
storms
aidan
##matic
autonomy
compliance
##quet
dudley
atp
##osis
1803
motto
documentation
summary
professors
spectacular
christina
archdiocese
flashing
innocence
remake
##dell
psychic
reef
scare
employ
rs
sticks
meg
gus
leans
##ude
accompany
bergen
tomas
##iko
doom
wages
pools
##nch
##bes
breasts
scholarly
alison
outline
brittany
breakthrough
willis
realistic
##cut
##boro
competitor
##stan
pike
picnic
icon
designing
commercials
washing
villain
skiing
micro
costumes
auburn
halted
executives
##hat
logistics
cycles
vowel
applicable
barrett
exclaimed
eurovision
eternity
ramon
##umi
##lls
modifications
sweeping
disgust
##uck
torch
aviv
ensuring
rude
dusty
sonic
donovan
outskirts
cu
pathway
##band
##gun
##lines
disciplines
acids
cadet
paired
##40
sketches
##sive
marriages
##⁺
folding
peers
slovak
implies
admired
##beck
1880s
leopold
instinct
attained
weston
megan
horace
##ination
dorsal
ingredients
evolutionary
##its
complications
deity
lethal
brushing
levy
deserted
institutes
posthumously
delivering
telescope
coronation
motivated
rapids
luc
flicked
pays
volcano
tanner
weighed
##nica
crowds
frankie
gifted
addressing
granddaughter
winding
##rna
constantine
gomez
##front
landscapes
rudolf
anthropology
slate
werewolf
##lio
astronomy
circa
rouge
dreaming
sack
knelt
drowned
naomi
prolific
tracked
freezing
herb
##dium
agony
randall
twisting
wendy
deposit
touches
vein
wheeler
##bbled
##bor
batted
retaining
tire
presently
compare
specification
daemon
nigel
##grave
merry
recommendation
czechoslovakia
sandra
ng
roma
##sts
lambert
inheritance
sheikh
winchester
cries
examining
##yle
comeback
cuisine
nave
##iv
ko
retrieve
tomatoes
barker
polished
defining
irene
lantern
personalities
begging
tract
swore
1809
175
##gic
omaha
brotherhood
##rley
haiti
##ots
exeter
##ete
##zia
steele
dumb
pearson
210
surveyed
elisabeth
trends
##ef
fritz
##rf
premium
bugs
fraction
calmly
viking
##birds
tug
inserted
unusually
##ield
confronted
distress
crashing
brent
turks
resign
##olo
cambodia
gabe
sauce
##kal
evelyn
116
extant
clusters
quarry
teenagers
luna
##lers
##ister
affiliation
drill
##ashi
panthers
scenic
libya
anita
strengthen
inscriptions
##cated
lace
sued
judith
riots
##uted
mint
##eta
preparations
midst
dub
challenger
##vich
mock
cf
displaced
wicket
breaths
enables
schmidt
analyst
##lum
ag
highlight
automotive
axe
josef
newark
sufficiently
resembles
50th
##pal
flushed
mum
traits
##ante
commodore
incomplete
warming
titular
ceremonial
ethical
118
celebrating
eighteenth
cao
lima
medalist
mobility
strips
snakes
##city
miniature
zagreb
barton
escapes
umbrella
automated
doubted
differs
cooled
georgetown
dresden
cooked
fade
wyatt
rna
jacobs
carlton
abundant
stereo
boost
madras
inning
##hia
spur
ip
malayalam
begged
osaka
groan
escaping
charging
dose
vista
##aj
bud
papa
communists
advocates
edged
tri
##cent
resemble
peaking
necklace
fried
montenegro
saxony
goose
glances
stuttgart
curator
recruit
grocery
sympathetic
##tting
##fort
127
lotus
randolph
ancestor
##rand
succeeding
jupiter
1798
macedonian
##heads
hiking
1808
handing
fischer
##itive
garbage
node
##pies
prone
singular
papua
inclined
attractions
italia
pouring
motioned
grandma
garnered
jacksonville
corp
ego
ringing
aluminum
##hausen
ordering
##foot
drawer
traders
synagogue
##play
##kawa
resistant
wandering
fragile
fiona
teased
var
hardcore
soaked
jubilee
decisive
exposition
mercer
poster
valencia
hale
kuwait
1811
##ises
##wr
##eed
tavern
gamma
122
johan
##uer
airways
amino
gil
##ury
vocational
domains
torres
##sp
generator
folklore
outcomes
##keeper
canberra
shooter
fl
beams
confrontation
##lling
##gram
feb
aligned
forestry
pipeline
jax
motorway
conception
decay
##tos
coffin
##cott
stalin
1805
escorted
minded
##nam
sitcom
purchasing
twilight
veronica
additions
passive
tensions
straw
123
frequencies
1804
refugee
cultivation
##iate
christie
clary
bulletin
crept
disposal
##rich
##zong
processor
crescent
##rol
bmw
emphasized
whale
nazis
aurora
##eng
dwelling
hauled
sponsors
toledo
mega
ideology
theatres
tessa
cerambycidae
saves
turtle
cone
suspects
kara
rusty
yelling
greeks
mozart
shades
cocked
participant
##tro
shire
spit
freeze
necessity
##cos
inmates
nielsen
councillors
loaned
uncommon
omar
peasants
botanical
offspring
daniels
formations
jokes
1794
pioneers
sigma
licensing
##sus
wheelchair
polite
1807
liquor
pratt
trustee
##uta
forewings
balloon
##zz
kilometre
camping
explicit
casually
shawn
foolish
teammates
nm
hassan
carrie
judged
satisfy
vanessa
knives
selective
cnn
flowed
##lice
eclipse
stressed
eliza
mathematician
cease
cultivated
##roy
commissions
browns
##ania
destroyers
sheridan
meadow
##rius
minerals
##cial
downstream
clash
gram
memoirs
ventures
baha
seymour
archie
midlands
edith
fare
flynn
invite
canceled
tiles
stabbed
boulder
incorporate
amended
camden
facial
mollusk
unreleased
descriptions
yoga
grabs
550
raises
ramp
shiver
##rose
coined
pioneering
tunes
qing
warwick
tops
119
melanie
giles
##rous
wandered
##inal
annexed
nov
30th
unnamed
##ished
organizational
airplane
normandy
stoke
whistle
blessing
violations
chased
holders
shotgun
##ctic
outlet
reactor
##vik
tires
tearing
shores
fortified
mascot
constituencies
nc
columnist
productive
tibet
##rta
lineage
hooked
oct
tapes
judging
cody
##gger
hansen
kashmir
triggered
##eva
solved
cliffs
##tree
resisted
anatomy
protesters
transparent
implied
##iga
injection
mattress
excluding
##mbo
defenses
helpless
devotion
##elli
growl
liberals
weber
phenomena
atoms
plug
##iff
mortality
apprentice
howe
convincing
aaa
swimmer
barber
leone
promptly
sodium
def
nowadays
arise
##oning
gloucester
corrected
dignity
norm
erie
##ders
elders
evacuated
sylvia
compression
##yar
hartford
pose
backpack
reasoning
accepts
24th
wipe
millimetres
marcel
##oda
dodgers
albion
1790
overwhelmed
aerospace
oaks
1795
showcase
acknowledge
recovering
nolan
ashe
hurts
geology
fashioned
disappearance
farewell
swollen
shrug
marquis
wimbledon
124
rue
1792
commemorate
reduces
experiencing
inevitable
calcutta
intel
##court
murderer
sticking
fisheries
imagery
bloom
280
brake
##inus
gustav
hesitation
memorable
po
viral
beans
accidents
tunisia
antenna
spilled
consort
treatments
aye
perimeter
##gard
donation
hostage
migrated
banker
addiction
apex
lil
trout
##ously
conscience
##nova
rams
sands
genome
passionate
troubles
##lets
##set
amid
##ibility
##ret
higgins
exceed
vikings
##vie
payne
##zan
muscular
##ste
defendant
sucking
##wal
ibrahim
fuselage
claudia
vfl
europeans
snails
interval
##garh
preparatory
statewide
tasked
lacrosse
viktor
##lation
angola
##hra
flint
implications
employs
teens
patrons
stall
weekends
barriers
scrambled
nucleus
tehran
jenna
parsons
lifelong
robots
displacement
5000
##bles
precipitation
##gt
knuckles
clutched
1802
marrying
ecology
marx
accusations
declare
scars
kolkata
mat
meadows
bermuda
skeleton
finalists
vintage
crawl
coordinate
affects
subjected
orchestral
mistaken
##tc
mirrors
dipped
relied
260
arches
candle
##nick
incorporating
wildly
fond
basilica
owl
fringe
rituals
whispering
stirred
feud
tertiary
slick
goat
honorable
whereby
skip
ricardo
stripes
parachute
adjoining
submerged
synthesizer
##gren
intend
positively
ninety
phi
beaver
partition
fellows
alexis
prohibition
carlisle
bizarre
fraternity
##bre
doubts
icy
cbc
aquatic
sneak
sonny
combines
airports
crude
supervised
spatial
merge
alfonso
##bic
corrupt
scan
undergo
##ams
disabilities
colombian
comparing
dolphins
perkins
##lish
reprinted
unanimous
bounced
hairs
underworld
midwest
semester
bucket
paperback
miniseries
coventry
demise
##leigh
demonstrations
sensor
rotating
yan
##hler
arrange
soils
##idge
hyderabad
labs
##dr
brakes
grandchildren
##nde
negotiated
rover
ferrari
continuation
directorate
augusta
stevenson
counterpart
gore
##rda
nursery
rican
ave
collectively
broadly
pastoral
repertoire
asserted
discovering
nordic
styled
fiba
cunningham
harley
middlesex
survives
tumor
tempo
zack
aiming
lok
urgent
##rade
##nto
devils
##ement
contractor
turin
##wl
##ool
bliss
repaired
simmons
moan
astronomical
cr
negotiate
lyric
1890s
lara
bred
clad
angus
pbs
##ience
engineered
posed
##lk
hernandez
possessions
elbows
psychiatric
strokes
confluence
electorate
lifts
campuses
lava
alps
##ep
##ution
##date
physicist
woody
##page
##ographic
##itis
juliet
reformation
sparhawk
320
complement
suppressed
jewel
##½
floated
##kas
continuity
sadly
##ische
inability
melting
scanning
paula
flour
judaism
safer
vague
##lm
solving
curb
##stown
financially
gable
bees
expired
miserable
cassidy
dominion
1789
cupped
145
robbery
facto
amos
warden
resume
tallest
marvin
ing
pounded
usd
declaring
gasoline
##aux
darkened
270
650
sophomore
##mere
erection
gossip
televised
risen
dial
##eu
pillars
##link
passages
profound
##tina
arabian
ashton
silicon
nail
##ead
##lated
##wer
##hardt
fleming
firearms
ducked
circuits
blows
waterloo
titans
##lina
atom
fireplace
cheshire
financed
activation
algorithms
##zzi
constituent
catcher
cherokee
partnerships
sexuality
platoon
tragic
vivian
guarded
whiskey
meditation
poetic
##late
##nga
##ake
porto
listeners
dominance
kendra
mona
chandler
factions
22nd
salisbury
attitudes
derivative
##ido
##haus
intake
paced
javier
illustrator
barrels
bias
cockpit
burnett
dreamed
ensuing
##anda
receptors
someday
hawkins
mattered
##lal
slavic
1799
jesuit
cameroon
wasted
tai
wax
lowering
victorious
freaking
outright
hancock
librarian
sensing
bald
calcium
myers
tablet
announcing
barack
shipyard
pharmaceutical
##uan
greenwich
flush
medley
patches
wolfgang
pt
speeches
acquiring
exams
nikolai
##gg
hayden
kannada
##type
reilly
##pt
waitress
abdomen
devastated
capped
pseudonym
pharmacy
fulfill
paraguay
1796
clicked
##trom
archipelago
syndicated
##hman
lumber
orgasm
rejection
clifford
lorraine
advent
mafia
rodney
brock
##ght
##used
##elia
cassette
chamberlain
despair
mongolia
sensors
developmental
upstream
##eg
##alis
spanning
165
trombone
basque
seeded
interred
renewable
rhys
leapt
revision
molecule
##ages
chord
vicious
nord
shivered
23rd
arlington
debts
corpus
sunrise
bays
blackburn
centimetres
##uded
shuddered
gm
strangely
gripping
cartoons
isabelle
orbital
##ppa
seals
proving
##lton
refusal
strengthened
bust
assisting
baghdad
batsman
portrayal
mara
pushes
spears
og
##cock
reside
nathaniel
brennan
1776
confirmation
caucus
##worthy
markings
yemen
nobles
ku
lazy
viewer
catalan
encompasses
sawyer
##fall
sparked
substances
patents
braves
arranger
evacuation
sergio
persuade
dover
tolerance
penguin
cum
jockey
insufficient
townships
occupying
declining
plural
processed
projection
puppet
flanders
introduces
liability
##yon
gymnastics
antwerp
taipei
hobart
candles
jeep
wes
observers
126
chaplain
bundle
glorious
##hine
hazel
flung
sol
excavations
dumped
stares
sh
bangalore
triangular
icelandic
intervals
expressing
turbine
##vers
songwriting
crafts
##igo
jasmine
ditch
rite
##ways
entertaining
comply
sorrow
wrestlers
basel
emirates
marian
rivera
helpful
##some
caution
downward
networking
##atory
##tered
darted
genocide
emergence
replies
specializing
spokesman
convenient
unlocked
fading
augustine
concentrations
resemblance
elijah
investigator
andhra
##uda
promotes
bean
##rrell
fleeing
wan
simone
announcer
##ame
##bby
lydia
weaver
132
residency
modification
##fest
stretches
##ast
alternatively
nat
lowe
lacks
##ented
pam
tile
concealed
inferior
abdullah
residences
tissues
vengeance
##ided
moisture
peculiar
groove
zip
bologna
jennings
ninja
oversaw
zombies
pumping
batch
livingston
emerald
installations
1797
peel
nitrogen
rama
##fying
##star
schooling
strands
responding
werner
##ost
lime
casa
accurately
targeting
##rod
underway
##uru
hemisphere
lester
##yard
occupies
2d
griffith
angrily
reorganized
##owing
courtney
deposited
##dd
##30
estadio
##ifies
dunn
exiled
##ying
checks
##combe
##о
##fly
successes
unexpectedly
blu
assessed
##flower
##ه
observing
sacked
spiders
kn
##tail
mu
nodes
prosperity
audrey
divisional
155
broncos
tangled
adjust
feeds
erosion
paolo
surf
directory
snatched
humid
admiralty
screwed
gt
reddish
##nese
modules
trench
lamps
bind
leah
bucks
competes
##nz
##form
transcription
##uc
isles
violently
clutching
pga
cyclist
inflation
flats
ragged
unnecessary
##hian
stubborn
coordinated
harriet
baba
disqualified
330
insect
wolfe
##fies
reinforcements
rocked
duel
winked
embraced
bricks
##raj
hiatus
defeats
pending
brightly
jealousy
##xton
##hm
##uki
lena
gdp
colorful
##dley
stein
kidney
##shu
underwear
wanderers
##haw
##icus
guardians
m³
roared
habits
##wise
permits
gp
uranium
punished
disguise
bundesliga
elise
dundee
erotic
partisan
pi
collectors
float
individually
rendering
behavioral
bucharest
ser
hare
valerie
corporal
nutrition
proportional
##isa
immense
##kis
pavement
##zie
##eld
sutherland
crouched
1775
##lp
suzuki
trades
endurance
operas
crosby
prayed
priory
rory
socially
##urn
gujarat
##pu
walton
cube
pasha
privilege
lennon
floods
thorne
waterfall
nipple
scouting
approve
##lov
minorities
voter
dwight
extensions
assure
ballroom
slap
dripping
privileges
rejoined
confessed
demonstrating
patriotic
yell
investor
##uth
pagan
slumped
squares
##cle
##kins
confront
bert
embarrassment
##aid
aston
urging
sweater
starr
yuri
brains
williamson
commuter
mortar
structured
selfish
exports
##jon
cds
##him
unfinished
##rre
mortgage
destinations
##nagar
canoe
solitary
buchanan
delays
magistrate
fk
##pling
motivation
##lier
##vier
recruiting
assess
##mouth
malik
antique
1791
pius
rahman
reich
tub
zhou
smashed
airs
galway
xii
conditioning
honduras
discharged
dexter
##pf
lionel
129
debates
lemon
tiffany
volunteered
dom
dioxide
procession
devi
sic
tremendous
advertisements
colts
transferring
verdict
hanover
decommissioned
utter
relate
pac
racism
##top
beacon
limp
similarity
terra
occurrence
ant
##how
becky
capt
updates
armament
richie
pal
##graph
halloween
mayo
##ssen
##bone
cara
serena
fcc
dolls
obligations
##dling
violated
lafayette
jakarta
exploitation
##ime
infamous
iconic
##lah
##park
kitty
moody
reginald
dread
spill
crystals
olivier
modeled
bluff
equilibrium
separating
notices
ordnance
extinction
onset
cosmic
attachment
sammy
expose
privy
anchored
##bil
abbott
admits
bending
baritone
emmanuel
policeman
vaughan
winged
climax
dresses
denny
polytechnic
mohamed
burmese
authentic
nikki
genetics
grandparents
homestead
gaza
postponed
metacritic
una
##sby
##bat
unstable
dissertation
##rial
##cian
curls
obscure
uncovered
bronx
praying
disappearing
##hoe
prehistoric
coke
turret
mutations
nonprofit
pits
monaco
##ي
##usion
prominently
dispatched
podium
##mir
uci
##uation
133
fortifications
birthplace
kendall
##lby
##oll
preacher
rack
goodman
##rman
persistent
##ott
countless
jaime
recorder
lexington
persecution
jumps
renewal
wagons
##11
crushing
##holder
decorations
##lake
abundance
wrath
laundry
£1
garde
##rp
jeanne
beetles
peasant
##sl
splitting
caste
sergei
##rer
##ema
scripts
##ively
rub
satellites
##vor
inscribed
verlag
scrapped
gale
packages
chick
potato
slogan
kathleen
arabs
##culture
counterparts
reminiscent
choral
##tead
rand
retains
bushes
dane
accomplish
courtesy
closes
##oth
slaughter
hague
krakow
lawson
tailed
elias
ginger
##ttes
canopy
betrayal
rebuilding
turf
##hof
frowning
allegiance
brigades
kicks
rebuild
polls
alias
nationalism
td
rowan
audition
bowie
fortunately
recognizes
harp
dillon
horrified
##oro
renault
##tics
ropes
##α
presumed
rewarded
infrared
wiping
accelerated
illustration
##rid
presses
practitioners
badminton
##iard
detained
##tera
recognizing
relates
misery
##sies
##tly
reproduction
piercing
potatoes
thornton
esther
manners
hbo
##aan
ours
bullshit
ernie
perennial
sensitivity
illuminated
rupert
##jin
##iss
##ear
rfc
nassau
##dock
staggered
socialism
##haven
appointments
nonsense
prestige
sharma
haul
##tical
solidarity
gps
##ook
##rata
igor
pedestrian
##uit
baxter
tenants
wires
medication
unlimited
guiding
impacts
diabetes
##rama
sasha
pas
clive
extraction
131
continually
constraints
##bilities
sonata
hunted
sixteenth
chu
planting
quote
mayer
pretended
abs
spat
##hua
ceramic
##cci
curtains
pigs
pitching
##dad
latvian
sore
dayton
##sted
##qi
patrols
slice
playground
##nted
shone
stool
apparatus
inadequate
mates
treason
##ija
desires
##liga
##croft
somalia
laurent
mir
leonardo
oracle
grape
obliged
chevrolet
thirteenth
stunning
enthusiastic
##ede
accounted
concludes
currents
basil
##kovic
drought
##rica
mai
##aire
shove
posting
##shed
pilgrimage
humorous
packing
fry
pencil
wines
smells
144
marilyn
aching
newest
clung
bon
neighbours
sanctioned
##pie
mug
##stock
drowning
##mma
hydraulic
##vil
hiring
reminder
lilly
investigators
##ncies
sour
##eous
compulsory
packet
##rion
##graphic
##elle
cannes
##inate
depressed
##rit
heroic
importantly
theresa
##tled
conway
saturn
marginal
rae
##xia
corresponds
royce
pact
jasper
explosives
packaging
aluminium
##ttered
denotes
rhythmic
spans
assignments
hereditary
outlined
originating
sundays
lad
reissued
greeting
beatrice
##dic
pillar
marcos
plots
handbook
alcoholic
judiciary
avant
slides
extract
masculine
blur
##eum
##force
homage
trembled
owens
hymn
trey
omega
signaling
socks
accumulated
reacted
attic
theo
lining
angie
distraction
primera
talbot
##key
1200
ti
creativity
billed
##hey
deacon
eduardo
identifies
proposition
dizzy
gunner
hogan
##yam
##pping
##hol
ja
##chan
jensen
reconstructed
##berger
clearance
darius
##nier
abe
harlem
plea
dei
circled
emotionally
notation
fascist
neville
exceeded
upwards
viable
ducks
##fo
workforce
racer
limiting
shri
##lson
possesses
1600
kerr
moths
devastating
laden
disturbing
locking
##cture
gal
fearing
accreditation
flavor
aide
1870s
mountainous
##baum
melt
##ures
motel
texture
servers
soda
##mb
herd
##nium
erect
puzzled
hum
peggy
examinations
gould
testified
geoff
ren
devised
sacks
##law
denial
posters
grunted
cesar
tutor
ec
gerry
offerings
byrne
falcons
combinations
ct
incoming
pardon
rocking
26th
avengers
flared
mankind
seller
uttar
loch
nadia
stroking
exposing
##hd
fertile
ancestral
instituted
##has
noises
prophecy
taxation
eminent
vivid
pol
##bol
dart
indirect
multimedia
notebook
upside
displaying
adrenaline
referenced
geometric
##iving
progression
##ddy
blunt
announce
##far
implementing
##lav
aggression
liaison
cooler
cares
headache
plantations
gorge
dots
impulse
thickness
ashamed
averaging
kathy
obligation
precursor
137
fowler
symmetry
thee
225
hears
##rai
undergoing
ads
butcher
bowler
##lip
cigarettes
subscription
goodness
##ically
browne
##hos
##tech
kyoto
donor
##erty
damaging
friction
drifting
expeditions
hardened
prostitution
152
fauna
blankets
claw
tossing
snarled
butterflies
recruits
investigative
coated
healed
138
communal
hai
xiii
academics
boone
psychologist
restless
lahore
stephens
mba
brendan
foreigners
printer
##pc
ached
explode
27th
deed
scratched
dared
##pole
cardiac
1780
okinawa
proto
commando
compelled
oddly
electrons
##base
replica
thanksgiving
##rist
sheila
deliberate
stafford
tidal
representations
hercules
ou
##path
##iated
kidnapping
lenses
##tling
deficit
samoa
mouths
consuming
computational
maze
granting
smirk
razor
fixture
ideals
inviting
aiden
nominal
##vs
issuing
julio
pitt
ramsey
docks
##oss
exhaust
##owed
bavarian
draped
anterior
mating
ethiopian
explores
noticing
##nton
discarded
convenience
hoffman
endowment
beasts
cartridge
mormon
paternal
probe
sleeves
interfere
lump
deadline
##rail
jenks
bulldogs
scrap
alternating
justified
reproductive
nam
seize
descending
secretariat
kirby
coupe
grouped
smash
panther
sedan
tapping
##18
lola
cheer
germanic
unfortunate
##eter
unrelated
##fan
subordinate
##sdale
suzanne
advertisement
##ility
horsepower
##lda
cautiously
discourse
luigi
##mans
##fields
noun
prevalent
mao
schneider
everett
surround
governorate
kira
##avia
westward
##take
misty
rails
sustainability
134
unused
##rating
packs
toast
unwilling
regulate
thy
suffrage
nile
awe
assam
definitions
travelers
affordable
##rb
conferred
sells
undefeated
beneficial
torso
basal
repeating
remixes
##pass
bahrain
cables
fang
##itated
excavated
numbering
statutory
##rey
deluxe
##lian
forested
ramirez
derbyshire
zeus
slamming
transfers
astronomer
banana
lottery
berg
histories
bamboo
##uchi
resurrection
posterior
bowls
vaguely
##thi
thou
preserving
tensed
offence
##inas
meyrick
callum
ridden
watt
langdon
tying
lowland
snorted
daring
truman
##hale
##girl
aura
overly
filing
weighing
goa
infections
philanthropist
saunders
eponymous
##owski
latitude
perspectives
reviewing
mets
commandant
radial
##kha
flashlight
reliability
koch
vowels
amazed
ada
elaine
supper
##rth
##encies
predator
debated
soviets
cola
##boards
##nah
compartment
crooked
arbitrary
fourteenth
##ctive
havana
majors
steelers
clips
profitable
ambush
exited
packers
##tile
nude
cracks
fungi
##е
limb
trousers
josie
shelby
tens
frederic
##ος
definite
smoothly
constellation
insult
baton
discs
lingering
##nco
conclusions
lent
staging
becker
grandpa
shaky
##tron
einstein
obstacles
sk
adverse
elle
economically
##moto
mccartney
thor
dismissal
motions
readings
nostrils
treatise
##pace
squeezing
evidently
prolonged
1783
venezuelan
je
marguerite
beirut
takeover
shareholders
##vent
denise
digit
airplay
norse
##bbling
imaginary
pills
hubert
blaze
vacated
eliminating
##ello
vine
mansfield
##tty
retrospective
barrow
borne
clutch
bail
forensic
weaving
##nett
##witz
desktop
citadel
promotions
worrying
dorset
ieee
subdivided
##iating
manned
expeditionary
pickup
synod
chuckle
185
barney
##rz
##ffin
functionality
karachi
litigation
meanings
uc
lick
turbo
anders
##ffed
execute
curl
oppose
ankles
typhoon
##د
##ache
##asia
linguistics
compassion
pressures
grazing
perfection
##iting
immunity
monopoly
muddy
backgrounds
136
namibia
francesca
monitors
attracting
stunt
tuition
##ии
vegetable
##mates
##quent
mgm
jen
complexes
forts
##ond
cellar
bites
seventeenth
royals
flemish
failures
mast
charities
##cular
peruvian
capitals
macmillan
ipswich
outward
frigate
postgraduate
folds
employing
##ouse
concurrently
fiery
##tai
contingent
nightmares
monumental
nicaragua
##kowski
lizard
mal
fielding
gig
reject
##pad
harding
##ipe
coastline
##cin
##nos
beethoven
humphrey
innovations
##tam
##nge
norris
doris
solicitor
huang
obey
141
##lc
niagara
##tton
shelves
aug
bourbon
curry
nightclub
specifications
hilton
##ndo
centennial
dispersed
worm
neglected
briggs
sm
font
kuala
uneasy
plc
##nstein
##bound
##aking
##burgh
awaiting
pronunciation
##bbed
##quest
eh
optimal
zhu
raped
greens
presided
brenda
worries
##life
venetian
marxist
turnout
##lius
refined
braced
sins
grasped
sunderland
nickel
speculated
lowell
cyrillic
communism
fundraising
resembling
colonists
mutant
freddie
usc
##mos
gratitude
##run
mural
##lous
chemist
wi
reminds
28th
steals
tess
pietro
##ingen
promoter
ri
microphone
honoured
rai
sant
##qui
feather
##nson
burlington
kurdish
terrorists
deborah
sickness
##wed
##eet
hazard
irritated
desperation
veil
clarity
##rik
jewels
xv
##gged
##ows
##cup
berkshire
unfair
mysteries
orchid
winced
exhaustion
renovations
stranded
obe
infinity
##nies
adapt
redevelopment
thanked
registry
olga
domingo
noir
tudor
ole
##atus
commenting
behaviors
##ais
crisp
pauline
probable
stirling
wigan
##bian
paralympics
panting
surpassed
##rew
luca
barred
pony
famed
##sters
cassandra
waiter
carolyn
exported
##orted
andres
destructive
deeds
jonah
castles
vacancy
suv
##glass
1788
orchard
yep
famine
belarusian
sprang
##forth
skinny
##mis
administrators
rotterdam
zambia
zhao
boiler
discoveries
##ride
##physics
lucius
disappointing
outreach
spoon
##frame
qualifications
unanimously
enjoys
regency
##iidae
stade
realism
veterinary
rodgers
dump
alain
chestnut
castile
censorship
rumble
gibbs
##itor
communion
reggae
inactivated
logs
loads
##houses
homosexual
##iano
ale
informs
##cas
phrases
plaster
linebacker
ambrose
kaiser
fascinated
850
limerick
recruitment
forge
mastered
##nding
leinster
rooted
threaten
##strom
borneo
##hes
suggestions
scholarships
propeller
documentaries
patronage
coats
constructing
invest
neurons
comet
entirety
shouts
identities
annoying
unchanged
wary
##antly
##ogy
neat
oversight
##kos
phillies
replay
constance
##kka
incarnation
humble
skies
minus
##acy
smithsonian
##chel
guerrilla
jar
cadets
##plate
surplus
audit
##aru
cracking
joanna
louisa
pacing
##lights
intentionally
##iri
diner
nwa
imprint
australians
tong
unprecedented
bunker
naive
specialists
ark
nichols
railing
leaked
pedal
##uka
shrub
longing
roofs
v8
captains
neural
tuned
##ntal
##jet
emission
medina
frantic
codex
definitive
sid
abolition
intensified
stocks
enrique
sustain
genoa
oxide
##written
clues
cha
##gers
tributaries
fragment
venom
##rity
##ente
##sca
muffled
vain
sire
laos
##ingly
##hana
hastily
snapping
surfaced
sentiment
motive
##oft
contests
approximate
mesa
luckily
dinosaur
exchanges
propelled
accord
bourne
relieve
tow
masks
offended
##ues
cynthia
##mmer
rains
bartender
zinc
reviewers
lois
##sai
legged
arrogant
rafe
rosie
comprise
handicap
blockade
inlet
lagoon
copied
drilling
shelley
petals
##inian
mandarin
obsolete
##inated
onward
arguably
productivity
cindy
praising
seldom
busch
discusses
raleigh
shortage
ranged
stanton
encouragement
firstly
conceded
overs
temporal
##uke
cbe
##bos
woo
certainty
pumps
##pton
stalked
##uli
lizzie
periodic
thieves
weaker
##night
gases
shoving
chooses
wc
##chemical
prompting
weights
##kill
robust
flanked
sticky
hu
tuberculosis
##eb
##eal
christchurch
resembled
wallet
reese
inappropriate
pictured
distract
fixing
fiddle
giggled
burger
heirs
hairy
mechanic
torque
apache
obsessed
chiefly
cheng
logging
##tag
extracted
meaningful
numb
##vsky
gloucestershire
reminding
##bay
unite
##lit
breeds
diminished
clown
glove
1860s
##ن
##ug
archibald
focal
freelance
sliced
depiction
##yk
organism
switches
sights
stray
crawling
##ril
lever
leningrad
interpretations
loops
anytime
reel
alicia
delighted
##ech
inhaled
xiv
suitcase
bernie
vega
licenses
northampton
exclusion
induction
monasteries
racecourse
homosexuality
##right
##sfield
##rky
dimitri
michele
alternatives
ions
commentators
genuinely
objected
pork
hospitality
fencing
stephan
warships
peripheral
wit
drunken
wrinkled
quentin
spends
departing
chung
numerical
spokesperson
##zone
johannesburg
caliber
killers
##udge
assumes
neatly
demographic
abigail
bloc
##vel
mounting
##lain
bentley
slightest
xu
recipients
##jk
merlin
##writer
seniors
prisons
blinking
hindwings
flickered
kappa
##hel
80s
strengthening
appealing
brewing
gypsy
mali
lashes
hulk
unpleasant
harassment
bio
treaties
predict
instrumentation
pulp
troupe
boiling
mantle
##ffe
ins
##vn
dividing
handles
verbs
##onal
coconut
senegal
340
thorough
gum
momentarily
##sto
cocaine
panicked
destined
##turing
teatro
denying
weary
captained
mans
##hawks
##code
wakefield
bollywood
thankfully
##16
cyril
##wu
amendments
##bahn
consultation
stud
reflections
kindness
1787
internally
##ovo
tex
mosaic
distribute
paddy
seeming
143
##hic
piers
##15
##mura
##verse
popularly
winger
kang
sentinel
mccoy
##anza
covenant
##bag
verge
fireworks
suppress
thrilled
dominate
##jar
swansea
##60
142
reconciliation
##ndi
stiffened
cue
dorian
##uf
damascus
amor
ida
foremost
##aga
porsche
unseen
dir
##had
##azi
stony
lexi
melodies
##nko
angular
integer
podcast
ants
inherent
jaws
justify
persona
##olved
josephine
##nr
##ressed
customary
flashes
gala
cyrus
glaring
backyard
ariel
physiology
greenland
html
stir
avon
atletico
finch
methodology
ked
##lent
mas
catholicism
townsend
branding
quincy
fits
containers
1777
ashore
aragon
##19
forearm
poisoning
##sd
adopting
conquer
grinding
amnesty
keller
finances
evaluate
forged
lankan
instincts
##uto
guam
bosnian
photographed
workplace
desirable
protector
##dog
allocation
intently
encourages
willy
##sten
bodyguard
electro
brighter
##ν
bihar
##chev
lasts
opener
amphibious
sal
verde
arte
##cope
captivity
vocabulary
yields
##tted
agreeing
desmond
pioneered
##chus
strap
campaigned
railroads
##ович
emblem
##dre
stormed
501
##ulous
marijuana
northumberland
##gn
##nath
bowen
landmarks
beaumont
##qua
danube
##bler
attorneys
th
ge
flyers
critique
villains
cass
mutation
acc
##0s
colombo
mckay
motif
sampling
concluding
syndicate
##rell
neon
stables
ds
warnings
clint
mourning
wilkinson
##tated
merrill
leopard
evenings
exhaled
emil
sonia
ezra
discrete
stove
farrell
fifteenth
prescribed
superhero
##rier
worms
helm
wren
##duction
##hc
expo
##rator
hq
unfamiliar
antony
prevents
acceleration
fiercely
mari
painfully
calculations
cheaper
ign
clifton
irvine
davenport
mozambique
##np
pierced
##evich
wonders
##wig
##cate
##iling
crusade
ware
##uel
enzymes
reasonably
mls
##coe
mater
ambition
bunny
eliot
kernel
##fin
asphalt
headmaster
torah
aden
lush
pins
waived
##care
##yas
joao
substrate
enforce
##grad
##ules
alvarez
selections
epidemic
tempted
##bit
bremen
translates
ensured
waterfront
29th
forrest
manny
malone
kramer
reigning
cookies
simpler
absorption
205
engraved
##ffy
evaluated
1778
haze
146
comforting
crossover
##abe
thorn
##rift
##imo
##pop
suppression
fatigue
cutter
##tr
201
wurttemberg
##orf
enforced
hovering
proprietary
gb
samurai
syllable
ascent
lacey
tick
lars
tractor
merchandise
rep
bouncing
defendants
##yre
huntington
##ground
##oko
standardized
##hor
##hima
assassinated
nu
predecessors
rainy
liar
assurance
lyrical
##uga
secondly
flattened
ios
parameter
undercover
##mity
bordeaux
punish
ridges
markers
exodus
inactive
hesitate
debbie
nyc
pledge
savoy
nagar
offset
organist
##tium
hesse
marin
converting
##iver
diagram
propulsion
pu
validity
reverted
supportive
##dc
ministries
clans
responds
proclamation
##inae
##ø
##rea
ein
pleading
patriot
sf
birch
islanders
strauss
hates
##dh
brandenburg
concession
rd
##ob
1900s
killings
textbook
antiquity
cinematography
wharf
embarrassing
setup
creed
farmland
inequality
centred
signatures
fallon
370
##ingham
##uts
ceylon
gazing
directive
laurie
##tern
globally
##uated
##dent
allah
excavation
threads
##cross
148
frantically
icc
utilize
determines
respiratory
thoughtful
receptions
##dicate
merging
chandra
seine
147
builders
builds
diagnostic
dev
visibility
goddamn
analyses
dhaka
cho
proves
chancel
concurrent
curiously
canadians
pumped
restoring
1850s
turtles
jaguar
sinister
spinal
traction
declan
vows
1784
glowed
capitalism
swirling
install
universidad
##lder
##oat
soloist
##genic
##oor
coincidence
beginnings
nissan
dip
resorts
caucasus
combustion
infectious
##eno
pigeon
serpent
##itating
conclude
masked
salad
jew
##gr
surreal
toni
##wc
harmonica
151
##gins
##etic
##coat
fishermen
intending
bravery
##wave
klaus
titan
wembley
taiwanese
ransom
40th
incorrect
hussein
eyelids
jp
cooke
dramas
utilities
##etta
##print
eisenhower
principally
granada
lana
##rak
openings
concord
##bl
bethany
connie
morality
sega
##mons
##nard
earnings
##kara
##cine
wii
communes
##rel
coma
composing
softened
severed
grapes
##17
nguyen
analyzed
warlord
hubbard
heavenly
behave
slovenian
##hit
##ony
hailed
filmmakers
trance
caldwell
skye
unrest
coward
likelihood
##aging
bern
sci
taliban
honolulu
propose
##wang
1700
browser
imagining
cobra
contributes
dukes
instinctively
conan
violinist
##ores
accessories
gradual
##amp
quotes
sioux
##dating
undertake
intercepted
sparkling
compressed
139
fungus
tombs
haley
imposing
rests
degradation
lincolnshire
retailers
wetlands
tulsa
distributor
dungeon
nun
greenhouse
convey
atlantis
aft
exits
oman
dresser
lyons
##sti
joking
eddy
judgement
omitted
digits
##cts
##game
juniors
##rae
cents
stricken
une
##ngo
wizards
weir
breton
nan
technician
fibers
liking
royalty
##cca
154
persia
terribly
magician
##rable
##unt
vance
cafeteria
booker
camille
warmer
##static
consume
cavern
gaps
compass
contemporaries
foyer
soothing
graveyard
maj
plunged
blush
##wear
cascade
demonstrates
ordinance
##nov
boyle
##lana
rockefeller
shaken
banjo
izzy
##ense
breathless
vines
##32
##eman
alterations
chromosome
dwellings
feudal
mole
153
catalonia
relics
tenant
mandated
##fm
fridge
hats
honesty
patented
raul
heap
cruisers
accusing
enlightenment
infants
wherein
chatham
contractors
zen
affinity
hc
osborne
piston
156
traps
maturity
##rana
lagos
##zal
peering
##nay
attendant
dealers
protocols
subset
prospects
biographical
##cre
artery
##zers
insignia
nuns
endured
##eration
recommend
schwartz
serbs
berger
cromwell
crossroads
##ctor
enduring
clasped
grounded
##bine
marseille
twitched
abel
choke
https
catalyst
moldova
italians
##tist
disastrous
wee
##oured
##nti
wwf
nope
##piration
##asa
expresses
thumbs
167
##nza
coca
1781
cheating
##ption
skipped
sensory
heidelberg
spies
satan
dangers
semifinal
202
bohemia
whitish
confusing
shipbuilding
relies
surgeons
landings
ravi
baku
moor
suffix
alejandro
##yana
litre
upheld
##unk
rajasthan
##rek
coaster
insists
posture
scenarios
etienne
favoured
appoint
transgender
elephants
poked
greenwood
defences
fulfilled
militant
somali
1758
chalk
potent
##ucci
migrants
wink
assistants
nos
restriction
activism
niger
##ario
colon
shaun
##sat
daphne
##erated
swam
congregations
reprise
considerations
magnet
playable
xvi
##р
overthrow
tobias
knob
chavez
coding
##mers
propped
katrina
orient
newcomer
##suke
temperate
##pool
farmhouse
interrogation
##vd
committing
##vert
forthcoming
strawberry
joaquin
macau
ponds
shocking
siberia
##cellular
chant
contributors
##nant
##ologists
sped
absorb
hail
1782
spared
##hore
barbados
karate
opus
originates
saul
##xie
evergreen
leaped
##rock
correlation
exaggerated
weekday
unification
bump
tracing
brig
afb
pathways
utilizing
##ners
mod
mb
disturbance
kneeling
##stad
##guchi
100th
pune
##thy
decreasing
168
manipulation
miriam
academia
ecosystem
occupational
rbi
##lem
rift
##14
rotary
stacked
incorporation
awakening
generators
guerrero
racist
##omy
cyber
derivatives
culminated
allie
annals
panzer
sainte
wikipedia
pops
zu
austro
##vate
algerian
politely
nicholson
mornings
educate
tastes
thrill
dartmouth
##gating
db
##jee
regan
differing
concentrating
choreography
divinity
##media
pledged
alexandre
routing
gregor
madeline
##idal
apocalypse
##hora
gunfire
culminating
elves
fined
liang
lam
programmed
tar
guessing
transparency
gabrielle
##gna
cancellation
flexibility
##lining
accession
shea
stronghold
nets
specializes
##rgan
abused
hasan
sgt
ling
exceeding
##₄
admiration
supermarket
##ark
photographers
specialised
tilt
resonance
hmm
perfume
380
sami
threatens
garland
botany
guarding
boiled
greet
puppy
russo
supplier
wilmington
vibrant
vijay
##bius
paralympic
grumbled
paige
faa
licking
margins
hurricanes
##gong
fest
grenade
ripping
##uz
counseling
weigh
##sian
needles
wiltshire
edison
costly
##not
fulton
tramway
redesigned
staffordshire
cache
gasping
watkins
sleepy
candidacy
##group
monkeys
timeline
throbbing
##bid
##sos
berth
uzbekistan
vanderbilt
bothering
overturned
ballots
gem
##iger
sunglasses
subscribers
hooker
compelling
ang
exceptionally
saloon
stab
##rdi
carla
terrifying
rom
##vision
coil
##oids
satisfying
vendors
31st
mackay
deities
overlooked
ambient
bahamas
felipe
olympia
whirled
botanist
advertised
tugging
##dden
disciples
morales
unionist
rites
foley
morse
motives
creepy
##₀
soo
##sz
bargain
highness
frightening
turnpike
tory
reorganization
##cer
depict
biographer
##walk
unopposed
manifesto
##gles
institut
emile
accidental
kapoor
##dam
kilkenny
cortex
lively
##13
romanesque
jain
shan
cannons
##ood
##ske
petrol
echoing
amalgamated
disappears
cautious
proposes
sanctions
trenton
##ر
flotilla
aus
contempt
tor
canary
cote
theirs
##hun
conceptual
deleted
fascinating
paso
blazing
elf
honourable
hutchinson
##eiro
##outh
##zin
surveyor
tee
amidst
wooded
reissue
intro
##ono
cobb
shelters
newsletter
hanson
brace
encoding
confiscated
dem
caravan
marino
scroll
melodic
cows
imam
##adi
##aneous
northward
searches
biodiversity
cora
310
roaring
##bers
connell
theologian
halo
compose
pathetic
unmarried
dynamo
##oot
az
calculation
toulouse
deserves
humour
nr
forgiveness
tam
undergone
martyr
pamela
myths
whore
counselor
hicks
290
heavens
battleship
electromagnetic
##bbs
stellar
establishments
presley
hopped
##chin
temptation
90s
wills
nas
##yuan
nhs
##nya
seminars
##yev
adaptations
gong
asher
lex
indicator
sikh
tobago
cites
goin
##yte
satirical
##gies
characterised
correspond
bubbles
lure
participates
##vid
eruption
skate
therapeutic
1785
canals
wholesale
defaulted
sac
460
petit
##zzled
virgil
leak
ravens
256
portraying
##yx
ghetto
creators
dams
portray
vicente
##rington
fae
namesake
bounty
##arium
joachim
##ota
##iser
aforementioned
axle
snout
depended
dismantled
reuben
480
##ibly
gallagher
##lau
##pd
earnest
##ieu
##iary
inflicted
objections
##llar
asa
gritted
##athy
jericho
##sea
##was
flick
underside
ceramics
undead
substituted
195
eastward
undoubtedly
wheeled
chimney
##iche
guinness
cb
##ager
siding
##bell
traitor
baptiste
disguised
inauguration
149
tipperary
choreographer
perched
warmed
stationary
eco
##ike
##ntes
bacterial
##aurus
flores
phosphate
##core
attacker
invaders
alvin
intersects
a1
indirectly
immigrated
businessmen
cornelius
valves
narrated
pill
sober
ul
nationale
monastic
applicants
scenery
##jack
161
motifs
constitutes
cpu
##osh
jurisdictions
sd
tuning
irritation
woven
##uddin
fertility
gao
##erie
antagonist
impatient
glacial
hides
boarded
denominations
interception
##jas
cookie
nicola
##tee
algebraic
marquess
bahn
parole
buyers
bait
turbines
paperwork
bestowed
natasha
renee
oceans
purchases
157
vaccine
215
##tock
fixtures
playhouse
integrate
jai
oswald
intellectuals
##cky
booked
nests
mortimer
##isi
obsession
sept
##gler
##sum
440
scrutiny
simultaneous
squinted
##shin
collects
oven
shankar
penned
remarkably
##я
slips
luggage
spectral
1786
collaborations
louie
consolidation
##ailed
##ivating
420
hoover
blackpool
harness
ignition
vest
tails
belmont
mongol
skinner
##nae
visually
mage
derry
##tism
##unce
stevie
transitional
##rdy
redskins
drying
prep
prospective
##21
annoyance
oversee
##loaded
fills
##books
##iki
announces
fda
scowled
respects
prasad
mystic
tucson
##vale
revue
springer
bankrupt
1772
aristotle
salvatore
habsburg
##geny
dal
natal
nut
pod
chewing
darts
moroccan
walkover
rosario
lenin
punjabi
##ße
grossed
scattering
wired
invasive
hui
polynomial
corridors
wakes
gina
portrays
##cratic
arid
retreating
erich
irwin
sniper
##dha
linen
lindsey
maneuver
butch
shutting
socio
bounce
commemorative
postseason
jeremiah
pines
275
mystical
beads
bp
abbas
furnace
bidding
consulted
assaulted
empirical
rubble
enclosure
sob
weakly
cancel
polly
yielded
##emann
curly
prediction
battered
70s
vhs
jacqueline
render
sails
barked
detailing
grayson
riga
sloane
raging
##yah
herbs
bravo
##athlon
alloy
giggle
imminent
suffers
assumptions
waltz
##itate
accomplishments
##ited
bathing
remixed
deception
prefix
##emia
deepest
##tier
##eis
balkan
frogs
##rong
slab
##pate
philosophers
peterborough
grains
imports
dickinson
rwanda
##atics
1774
dirk
lan
tablets
##rove
clone
##rice
caretaker
hostilities
mclean
##gre
regimental
treasures
norms
impose
tsar
tango
diplomacy
variously
complain
192
recognise
arrests
1779
celestial
pulitzer
##dus
bing
libretto
##moor
adele
splash
##rite
expectation
lds
confronts
##izer
spontaneous
harmful
wedge
entrepreneurs
buyer
##ope
bilingual
translate
rugged
conner
circulated
uae
eaton
##gra
##zzle
lingered
lockheed
vishnu
reelection
alonso
##oom
joints
yankee
headline
cooperate
heinz
laureate
invading
##sford
echoes
scandinavian
##dham
hugging
vitamin
salute
micah
hind
trader
##sper
radioactive
##ndra
militants
poisoned
ratified
remark
campeonato
deprived
wander
prop
##dong
outlook
##tani
##rix
##eye
chiang
darcy
##oping
mandolin
spice
statesman
babylon
182
walled
forgetting
afro
##cap
158
giorgio
buffer
##polis
planetary
##gis
overlap
terminals
kinda
centenary
##bir
arising
manipulate
elm
ke
1770
ak
##tad
chrysler
mapped
moose
pomeranian
quad
macarthur
assemblies
shoreline
recalls
stratford
##rted
noticeable
##evic
imp
##rita
##sque
accustomed
supplying
tents
disgusted
vogue
sipped
filters
khz
reno
selecting
luftwaffe
mcmahon
tyne
masterpiece
carriages
collided
dunes
exercised
flare
remembers
muzzle
##mobile
heck
##rson
burgess
lunged
middleton
boycott
bilateral
##sity
hazardous
lumpur
multiplayer
spotlight
jackets
goldman
liege
porcelain
rag
waterford
benz
attracts
hopeful
battling
ottomans
kensington
baked
hymns
cheyenne
lattice
levine
borrow
polymer
clashes
michaels
monitored
commitments
denounced
##25
##von
cavity
##oney
hobby
akin
##holders
futures
intricate
cornish
patty
##oned
illegally
dolphin
##lag
barlow
yellowish
maddie
apologized
luton
plagued
##puram
nana
##rds
sway
fanny
łodz
##rino
psi
suspicions
hanged
##eding
initiate
charlton
##por
nak
competent
235
analytical
annex
wardrobe
reservations
##rma
sect
162
fairfax
hedge
piled
buckingham
uneven
bauer
simplicity
snyder
interpret
accountability
donors
moderately
byrd
continents
##cite
##max
disciple
hr
jamaican
ping
nominees
##uss
mongolian
diver
attackers
eagerly
ideological
pillows
miracles
apartheid
revolver
sulfur
clinics
moran
163
##enko
ile
katy
rhetoric
##icated
chronology
recycling
##hrer
elongated
mughal
pascal
profiles
vibration
databases
domination
##fare
##rant
matthias
digest
rehearsal
polling
weiss
initiation
reeves
clinging
flourished
impress
ngo
##hoff
##ume
buckley
symposium
rhythms
weed
emphasize
transforming
##taking
##gence
##yman
accountant
analyze
flicker
foil
priesthood
voluntarily
decreases
##80
##hya
slater
sv
charting
mcgill
##lde
moreno
##iu
besieged
zur
robes
##phic
admitting
api
deported
turmoil
peyton
earthquakes
##ares
nationalists
beau
clair
brethren
interrupt
welch
curated
galerie
requesting
164
##ested
impending
steward
viper
##vina
complaining
beautifully
brandy
foam
nl
1660
##cake
alessandro
punches
laced
explanations
##lim
attribute
clit
reggie
discomfort
##cards
smoothed
whales
##cene
adler
countered
duffy
disciplinary
widening
recipe
reliance
conducts
goats
gradient
preaching
##shaw
matilda
quasi
striped
meridian
cannabis
cordoba
certificates
##agh
##tering
graffiti
hangs
pilgrims
repeats
##ych
revive
urine
etat
##hawk
fueled
belts
fuzzy
susceptible
##hang
mauritius
salle
sincere
beers
hooks
##cki
arbitration
entrusted
advise
sniffed
seminar
junk
donnell
processors
principality
strapped
celia
mendoza
everton
fortunes
prejudice
starving
reassigned
steamer
##lund
tuck
evenly
foreman
##ffen
dans
375
envisioned
slit
##xy
baseman
liberia
rosemary
##weed
electrified
periodically
potassium
stride
contexts
sperm
slade
mariners
influx
bianca
subcommittee
##rane
spilling
icao
estuary
##nock
delivers
iphone
##ulata
isa
mira
bohemian
dessert
##sbury
welcoming
proudly
slowing
##chs
musee
ascension
russ
##vian
waits
##psy
africans
exploit
##morphic
gov
eccentric
crab
peck
##ull
entrances
formidable
marketplace
groom
bolted
metabolism
patton
robbins
courier
payload
endure
##ifier
andes
refrigerator
##pr
ornate
##uca
ruthless
illegitimate
masonry
strasbourg
bikes
adobe
##³
apples
quintet
willingly
niche
bakery
corpses
energetic
##cliffe
##sser
##ards
177
centimeters
centro
fuscous
cretaceous
rancho
##yde
andrei
telecom
tottenham
oasis
ordination
vulnerability
presiding
corey
cp
penguins
sims
##pis
malawi
piss
##48
correction
##cked
##ffle
##ryn
countdown
detectives
psychiatrist
psychedelic
dinosaurs
blouse
##get
choi
vowed
##oz
randomly
##pol
49ers
scrub
blanche
bruins
dusseldorf
##using
unwanted
##ums
212
dominique
elevations
headlights
om
laguna
##oga
1750
famously
ignorance
shrewsbury
##aine
ajax
breuning
che
confederacy
greco
overhaul
##screen
paz
skirts
disagreement
cruelty
jagged
phoebe
shifter
hovered
viruses
##wes
mandy
##lined
##gc
landlord
squirrel
dashed
##ι
ornamental
gag
wally
grange
literal
spurs
undisclosed
proceeding
yin
##text
billie
orphan
spanned
humidity
indy
weighted
presentations
explosions
lucian
##tary
vaughn
hindus
##anga
##hell
psycho
171
daytona
protects
efficiently
rematch
sly
tandem
##oya
rebranded
impaired
hee
metropolis
peach
godfrey
diaspora
ethnicity
prosperous
gleaming
dar
grossing
playback
##rden
stripe
pistols
##tain
births
labelled
##cating
172
rudy
alba
##onne
aquarium
hostility
##gb
##tase
shudder
sumatra
hardest
lakers
consonant
creeping
demos
homicide
capsule
zeke
liberties
expulsion
pueblo
##comb
trait
transporting
##ddin
##neck
##yna
depart
gregg
mold
ledge
hangar
oldham
playboy
termination
analysts
gmbh
romero
##itic
insist
cradle
filthy
brightness
slash
shootout
deposed
bordering
##truct
isis
microwave
tumbled
sheltered
cathy
werewolves
messy
andersen
convex
clapped
clinched
satire
wasting
edo
vc
rufus
##jak
mont
##etti
poznan
##keeping
restructuring
transverse
##rland
azerbaijani
slovene
gestures
roommate
choking
shear
##quist
vanguard
oblivious
##hiro
disagreed
baptism
##lich
coliseum
##aceae
salvage
societe
cory
locke
relocation
relying
versailles
ahl
swelling
##elo
cheerful
##word
##edes
gin
sarajevo
obstacle
diverted
##nac
messed
thoroughbred
fluttered
utrecht
chewed
acquaintance
assassins
dispatch
mirza
##wart
nike
salzburg
swell
yen
##gee
idle
ligue
samson
##nds
##igh
playful
spawned
##cise
tease
##case
burgundy
##bot
stirring
skeptical
interceptions
marathi
##dies
bedrooms
aroused
pinch
##lik
preferences
tattoos
buster
digitally
projecting
rust
##ital
kitten
priorities
addison
pseudo
##guard
dusk
icons
sermon
##psis
##iba
bt
##lift
##xt
ju
truce
rink
##dah
##wy
defects
psychiatry
offences
calculate
glucose
##iful
##rized
##unda
francaise
##hari
richest
warwickshire
carly
1763
purity
redemption
lending
##cious
muse
bruises
cerebral
aero
carving
##name
preface
terminology
invade
monty
##int
anarchist
blurred
##iled
rossi
treats
guts
shu
foothills
ballads
undertaking
premise
cecilia
affiliates
blasted
conditional
wilder
minors
drone
rudolph
buffy
swallowing
horton
attested
##hop
rutherford
howell
primetime
livery
penal
##bis
minimize
hydro
wrecked
wrought
palazzo
##gling
cans
vernacular
friedman
nobleman
shale
walnut
danielle
##ection
##tley
sears
##kumar
chords
lend
flipping
streamed
por
dracula
gallons
sacrifices
gamble
orphanage
##iman
mckenzie
##gible
boxers
daly
##balls
##ان
208
##ific
##rative
##iq
exploited
slated
##uity
circling
hillary
pinched
goldberg
provost
campaigning
lim
piles
ironically
jong
mohan
successors
usaf
##tem
##ught
autobiographical
haute
preserves
##ending
acquitted
comparisons
203
hydroelectric
gangs
cypriot
torpedoes
rushes
chrome
derive
bumps
instability
fiat
pets
##mbe
silas
dye
reckless
settler
##itation
info
heats
##writing
176
canonical
maltese
fins
mushroom
stacy
aspen
avid
##kur
##loading
vickers
gaston
hillside
statutes
wilde
gail
kung
sabine
comfortably
motorcycles
##rgo
169
pneumonia
fetch
##sonic
axel
faintly
parallels
##oop
mclaren
spouse
compton
interdisciplinary
miner
##eni
181
clamped
##chal
##llah
separates
versa
##mler
scarborough
labrador
##lity
##osing
rutgers
hurdles
como
166
burt
divers
##100
wichita
cade
coincided
##erson
bruised
mla
##pper
vineyard
##ili
##brush
notch
mentioning
jase
hearted
kits
doe
##acle
pomerania
##ady
ronan
seizure
pavel
problematic
##zaki
domenico
##ulin
catering
penelope
dependence
parental
emilio
ministerial
atkinson
##bolic
clarkson
chargers
colby
grill
peeked
arises
summon
##aged
fools
##grapher
faculties
qaeda
##vial
garner
refurbished
##hwa
geelong
disasters
nudged
bs
shareholder
lori
algae
reinstated
rot
##ades
##nous
invites
stainless
183
inclusive
##itude
diocesan
til
##icz
denomination
##xa
benton
floral
registers
##ider
##erman
##kell
absurd
brunei
guangzhou
hitter
retaliation
##uled
##eve
blanc
nh
consistency
contamination
##eres
##rner
dire
palermo
broadcasters
diaries
inspire
vols
brewer
tightening
ky
mixtape
hormone
##tok
stokes
##color
##dly
##ssi
pg
##ometer
##lington
sanitation
##tility
intercontinental
apps
##adt
¹⁄₂
cylinders
economies
favourable
unison
croix
gertrude
odyssey
vanity
dangling
##logists
upgrades
dice
middleweight
practitioner
##ight
206
henrik
parlor
orion
angered
lac
python
blurted
##rri
sensual
intends
swings
angled
##phs
husky
attain
peerage
precinct
textiles
cheltenham
shuffled
dai
confess
tasting
bhutan
##riation
tyrone
segregation
abrupt
ruiz
##rish
smirked
blackwell
confidential
browning
amounted
##put
vase
scarce
fabulous
raided
staple
guyana
unemployed
glider
shay
##tow
carmine
troll
intervene
squash
superstar
##uce
cylindrical
len
roadway
researched
handy
##rium
##jana
meta
lao
declares
##rring
##tadt
##elin
##kova
willem
shrubs
napoleonic
realms
skater
qi
volkswagen
##ł
tad
hara
archaeologist
awkwardly
eerie
##kind
wiley
##heimer
##24
titus
organizers
cfl
crusaders
lama
usb
vent
enraged
thankful
occupants
maximilian
##gaard
possessing
textbooks
##oran
collaborator
quaker
##ulo
avalanche
mono
silky
straits
isaiah
mustang
surged
resolutions
potomac
descend
cl
kilograms
plato
strains
saturdays
##olin
bernstein
##ype
holstein
ponytail
##watch
belize
conversely
heroine
perpetual
##ylus
charcoal
piedmont
glee
negotiating
backdrop
prologue
##jah
##mmy
pasadena
climbs
ramos
sunni
##holm
##tner
##tri
anand
deficiency
hertfordshire
stout
##avi
aperture
orioles
##irs
doncaster
intrigued
bombed
coating
otis
##mat
cocktail
##jit
##eto
amir
arousal
sar
##proof
##act
##ories
dixie
pots
##bow
whereabouts
159
##fted
drains
bullying
cottages
scripture
coherent
fore
poe
appetite
##uration
sampled
##ators
##dp
derrick
rotor
jays
peacock
installment
##rro
advisors
##coming
rodeo
scotch
##mot
##db
##fen
##vant
ensued
rodrigo
dictatorship
martyrs
twenties
##н
towed
incidence
marta
rainforest
sai
scaled
##cles
oceanic
qualifiers
symphonic
mcbride
dislike
generalized
aubrey
colonization
##iation
##lion
##ssing
disliked
lublin
salesman
##ulates
spherical
whatsoever
sweating
avalon
contention
punt
severity
alderman
atari
##dina
##grant
##rop
scarf
seville
vertices
annexation
fairfield
fascination
inspiring
launches
palatinate
regretted
##rca
feral
##iom
elk
nap
olsen
reddy
yong
##leader
##iae
garment
transports
feng
gracie
outrage
viceroy
insides
##esis
breakup
grady
organizer
softer
grimaced
222
murals
galicia
arranging
vectors
##rsten
bas
##sb
##cens
sloan
##eka
bitten
ara
fender
nausea
bumped
kris
banquet
comrades
detector
persisted
##llan
adjustment
endowed
cinemas
##shot
sellers
##uman
peek
epa
kindly
neglect
simpsons
talon
mausoleum
runaway
hangul
lookout
##cic
rewards
coughed
acquainted
chloride
##ald
quicker
accordion
neolithic
##qa
artemis
coefficient
lenny
pandora
tx
##xed
ecstasy
litter
segunda
chairperson
gemma
hiss
rumor
vow
nasal
antioch
compensate
patiently
transformers
##eded
judo
morrow
penis
posthumous
philips
bandits
husbands
denote
flaming
##any
##phones
langley
yorker
1760
walters
##uo
##kle
gubernatorial
fatty
samsung
leroy
outlaw
##nine
unpublished
poole
jakob
##ᵢ
##ₙ
crete
distorted
superiority
##dhi
intercept
crust
mig
claus
crashes
positioning
188
stallion
301
frontal
armistice
##estinal
elton
aj
encompassing
camel
commemorated
malaria
woodward
calf
cigar
penetrate
##oso
willard
##rno
##uche
illustrate
amusing
convergence
noteworthy
##lma
##rva
journeys
realise
manfred
##sable
410
##vocation
hearings
fiance
##posed
educators
provoked
adjusting
##cturing
modular
stockton
paterson
vlad
rejects
electors
selena
maureen
##tres
uber
##rce
swirled
##num
proportions
nanny
pawn
naturalist
parma
apostles
awoke
ethel
wen
##bey
monsoon
overview
##inating
mccain
rendition
risky
adorned
##ih
equestrian
germain
nj
conspicuous
confirming
##yoshi
shivering
##imeter
milestone
rumours
flinched
bounds
smacked
token
##bei
lectured
automobiles
##shore
impacted
##iable
nouns
nero
##leaf
ismail
prostitute
trams
##lace
bridget
sud
stimulus
impressions
reins
revolves
##oud
##gned
giro
honeymoon
##swell
criterion
##sms
##uil
libyan
prefers
##osition
211
preview
sucks
accusation
bursts
metaphor
diffusion
tolerate
faye
betting
cinematographer
liturgical
specials
bitterly
humboldt
##ckle
flux
rattled
##itzer
archaeologists
odor
authorised
marshes
discretion
##ов
alarmed
archaic
inverse
##leton
explorers
##pine
drummond
tsunami
woodlands
##minate
##tland
booklet
insanity
owning
insert
crafted
calculus
##tore
receivers
##bt
stung
##eca
##nched
prevailing
travellers
eyeing
lila
graphs
##borne
178
julien
##won
morale
adaptive
therapist
erica
cw
libertarian
bowman
pitches
vita
##ional
crook
##ads
##entation
caledonia
mutiny
##sible
1840s
automation
##ß
flock
##pia
ironic
pathology
##imus
remarried
##22
joker
withstand
energies
##att
shropshire
hostages
madeleine
tentatively
conflicting
mateo
recipes
euros
ol
mercenaries
nico
##ndon
albuquerque
augmented
mythical
bel
freud
##child
cough
##lica
365
freddy
lillian
genetically
nuremberg
calder
209
bonn
outdoors
paste
suns
urgency
vin
restraint
tyson
##cera
##selle
barrage
bethlehem
kahn
##par
mounts
nippon
barony
happier
ryu
makeshift
sheldon
blushed
castillo
barking
listener
taped
bethel
fluent
headlines
pornography
rum
disclosure
sighing
mace
doubling
gunther
manly
##plex
rt
interventions
physiological
forwards
emerges
##tooth
##gny
compliment
rib
recession
visibly
barge
faults
connector
exquisite
prefect
##rlin
patio
##cured
elevators
brandt
italics
pena
173
wasp
satin
ea
botswana
graceful
respectable
##jima
##rter
##oic
franciscan
generates
##dl
alfredo
disgusting
##olate
##iously
sherwood
warns
cod
promo
cheryl
sino
##ة
##escu
twitch
##zhi
brownish
thom
ortiz
##dron
densely
##beat
carmel
reinforce
##bana
187
anastasia
downhill
vertex
contaminated
remembrance
harmonic
homework
##sol
fiancee
gears
olds
angelica
loft
ramsay
quiz
colliery
sevens
##cape
autism
##hil
walkway
##boats
ruben
abnormal
ounce
khmer
##bbe
zachary
bedside
morphology
punching
##olar
sparrow
convinces
##35
hewitt
queer
remastered
rods
mabel
solemn
notified
lyricist
symmetric
##xide
174
encore
passports
wildcats
##uni
baja
##pac
mildly
##ease
bleed
commodity
mounds
glossy
orchestras
##omo
damian
prelude
ambitions
##vet
awhile
remotely
##aud
asserts
imply
##iques
distinctly
modelling
remedy
##dded
windshield
dani
xiao
##endra
audible
powerplant
1300
invalid
elemental
acquisitions
##hala
immaculate
libby
plata
smuggling
ventilation
denoted
minh
##morphism
430
differed
dion
kelley
lore
mocking
sabbath
spikes
hygiene
drown
runoff
stylized
tally
liberated
aux
interpreter
righteous
aba
siren
reaper
pearce
millie
##cier
##yra
gaius
##iso
captures
##ttering
dorm
claudio
##sic
benches
knighted
blackness
##ored
discount
fumble
oxidation
routed
##ς
novak
perpendicular
spoiled
fracture
splits
##urt
pads
topology
##cats
axes
fortunate
offenders
protestants
esteem
221
broadband
convened
frankly
hound
prototypes
isil
facilitated
keel
##sher
sahara
awaited
bubba
orb
prosecutors
186
hem
520
##xing
relaxing
remnant
romney
sorted
slalom
stefano
ulrich
##active
exemption
folder
pauses
foliage
hitchcock
epithet
204
criticisms
##aca
ballistic
brody
hinduism
chaotic
youths
equals
##pala
pts
thicker
analogous
capitalist
improvised
overseeing
sinatra
ascended
beverage
##tl
straightforward
##kon
curran
##west
bois
325
induce
surveying
emperors
sax
unpopular
##kk
cartoonist
fused
##mble
unto
##yuki
localities
##cko
##ln
darlington
slain
academie
lobbying
sediment
puzzles
##grass
defiance
dickens
manifest
tongues
alumnus
arbor
coincide
184
appalachian
mustafa
examiner
cabaret
traumatic
yves
bracelet
draining
heroin
magnum
baths
odessa
consonants
mitsubishi
##gua
kellan
vaudeville
##fr
joked
null
straps
probation
##ław
ceded
interfaces
##pas
##zawa
blinding
viet
224
rothschild
museo
640
huddersfield
##vr
tactic
##storm
brackets
dazed
incorrectly
##vu
reg
glazed
fearful
manifold
benefited
irony
##sun
stumbling
##rte
willingness
balkans
mei
wraps
##aba
injected
##lea
gu
syed
harmless
##hammer
bray
takeoff
poppy
timor
cardboard
astronaut
purdue
weeping
southbound
cursing
stalls
diagonal
##neer
lamar
bryce
comte
weekdays
harrington
##uba
negatively
##see
lays
grouping
##cken
##henko
affirmed
halle
modernist
##lai
hodges
smelling
aristocratic
baptized
dismiss
justification
oilers
##now
coupling
qin
snack
healer
##qing
gardener
layla
battled
formulated
stephenson
gravitational
##gill
##jun
1768
granny
coordinating
suites
##cd
##ioned
monarchs
##cote
##hips
sep
blended
apr
barrister
deposition
fia
mina
policemen
paranoid
##pressed
churchyard
covert
crumpled
creep
abandoning
tr
transmit
conceal
barr
understands
readiness
spire
##cology
##enia
##erry
610
startling
unlock
vida
bowled
slots
##nat
##islav
spaced
trusting
admire
rig
##ink
slack
##70
mv
207
casualty
##wei
classmates
##odes
##rar
##rked
amherst
furnished
evolve
foundry
menace
mead
##lein
flu
wesleyan
##kled
monterey
webber
##vos
wil
##mith
##на
bartholomew
justices
restrained
##cke
amenities
191
mediated
sewage
trenches
ml
mainz
##thus
1800s
##cula
##inski
caine
bonding
213
converts
spheres
superseded
marianne
crypt
sweaty
ensign
historia
##br
spruce
##post
##ask
forks
thoughtfully
yukon
pamphlet
ames
##uter
karma
##yya
bryn
negotiation
sighs
incapable
##mbre
##ntial
actresses
taft
##mill
luce
prevailed
##amine
1773
motionless
envoy
testify
investing
sculpted
instructors
provence
kali
cullen
horseback
##while
goodwin
##jos
gaa
norte
##ldon
modify
wavelength
abd
214
skinned
sprinter
forecast
scheduling
marries
squared
tentative
##chman
boer
##isch
bolts
swap
fisherman
assyrian
impatiently
guthrie
martins
murdoch
194
tanya
nicely
dolly
lacy
med
##45
syn
decks
fashionable
millionaire
##ust
surfing
##ml
##ision
heaved
tammy
consulate
attendees
routinely
197
fuse
saxophonist
backseat
malaya
##lord
scowl
tau
##ishly
193
sighted
steaming
##rks
303
911
##holes
##hong
ching
##wife
bless
conserved
jurassic
stacey
unix
zion
chunk
rigorous
blaine
198
peabody
slayer
dismay
brewers
nz
##jer
det
##glia
glover
postwar
int
penetration
sylvester
imitation
vertically
airlift
heiress
knoxville
viva
##uin
390
macon
##rim
##fighter
##gonal
janice
##orescence
##wari
marius
belongings
leicestershire
196
blanco
inverted
preseason
sanity
sobbing
##due
##elt
##dled
collingwood
regeneration
flickering
shortest
##mount
##osi
feminism
##lat
sherlock
cabinets
fumbled
northbound
precedent
snaps
##mme
researching
##akes
guillaume
insights
manipulated
vapor
neighbour
sap
gangster
frey
f1
stalking
scarcely
callie
barnett
tendencies
audi
doomed
assessing
slung
panchayat
ambiguous
bartlett
##etto
distributing
violating
wolverhampton
##hetic
swami
histoire
##urus
liable
pounder
groin
hussain
larsen
popping
surprises
##atter
vie
curt
##station
mute
relocate
musicals
authorization
richter
##sef
immortality
tna
bombings
##press
deteriorated
yiddish
##acious
robbed
colchester
cs
pmid
ao
verified
balancing
apostle
swayed
recognizable
oxfordshire
retention
nottinghamshire
contender
judd
invitational
shrimp
uhf
##icient
cleaner
longitudinal
tanker
##mur
acronym
broker
koppen
sundance
suppliers
##gil
4000
clipped
fuels
petite
##anne
landslide
helene
diversion
populous
landowners
auspices
melville
quantitative
##xes
ferries
nicky
##llus
doo
haunting
roche
carver
downed
unavailable
##pathy
approximation
hiroshima
##hue
garfield
valle
comparatively
keyboardist
traveler
##eit
congestion
calculating
subsidiaries
##bate
serb
modernization
fairies
deepened
ville
averages
##lore
inflammatory
tonga
##itch
co₂
squads
##hea
gigantic
serum
enjoyment
retailer
verona
35th
cis
##phobic
magna
technicians
##vati
arithmetic
##sport
levin
##dation
amtrak
chow
sienna
##eyer
backstage
entrepreneurship
##otic
learnt
tao
##udy
worcestershire
formulation
baggage
hesitant
bali
sabotage
##kari
barren
enhancing
murmur
pl
freshly
putnam
syntax
aces
medicines
resentment
bandwidth
##sier
grins
chili
guido
##sei
framing
implying
gareth
lissa
genevieve
pertaining
admissions
geo
thorpe
proliferation
sato
bela
analyzing
parting
##gor
awakened
##isman
huddled
secrecy
##kling
hush
gentry
540
dungeons
##ego
coasts
##utz
sacrificed
##chule
landowner
mutually
prevalence
programmer
adolescent
disrupted
seaside
gee
trusts
vamp
georgie
##nesian
##iol
schedules
sindh
##market
etched
hm
sparse
bey
beaux
scratching
gliding
unidentified
216
collaborating
gems
jesuits
oro
accumulation
shaping
mbe
anal
##xin
231
enthusiasts
newscast
##egan
janata
dewey
parkinson
179
ankara
biennial
towering
dd
inconsistent
950
##chet
thriving
terminate
cabins
furiously
eats
advocating
donkey
marley
muster
phyllis
leiden
##user
grassland
glittering
iucn
loneliness
217
memorandum
armenians
##ddle
popularized
rhodesia
60s
lame
##illon
sans
bikini
header
orbits
##xx
##finger
##ulator
sharif
spines
biotechnology
strolled
naughty
yates
##wire
fremantle
milo
##mour
abducted
removes
##atin
humming
wonderland
##chrome
##ester
hume
pivotal
##rates
armand
grams
believers
elector
rte
apron
bis
scraped
##yria
endorsement
initials
##llation
eps
dotted
hints
buzzing
emigration
nearer
##tom
indicators
##ulu
coarse
neutron
protectorate
##uze
directional
exploits
pains
loire
1830s
proponents
guggenheim
rabbits
ritchie
305
hectare
inputs
hutton
##raz
verify
##ako
boilers
longitude
##lev
skeletal
yer
emilia
citrus
compromised
##gau
pokemon
prescription
paragraph
eduard
cadillac
attire
categorized
kenyan
weddings
charley
##bourg
entertain
monmouth
##lles
nutrients
davey
mesh
incentive
practised
ecosystems
kemp
subdued
overheard
##rya
bodily
maxim
##nius
apprenticeship
ursula
##fight
lodged
rug
silesian
unconstitutional
patel
inspected
coyote
unbeaten
##hak
34th
disruption
convict
parcel
##cl
##nham
collier
implicated
mallory
##iac
##lab
susannah
winkler
##rber
shia
phelps
sediments
graphical
robotic
##sner
adulthood
mart
smoked
##isto
kathryn
clarified
##aran
divides
convictions
oppression
pausing
burying
##mt
federico
mathias
eileen
##tana
kite
hunched
##acies
189
##atz
disadvantage
liza
kinetic
greedy
paradox
yokohama
dowager
trunks
ventured
##gement
gupta
vilnius
olaf
##thest
crimean
hopper
##ej
progressively
arturo
mouthed
arrondissement
##fusion
rubin
simulcast
oceania
##orum
##stra
##rred
busiest
intensely
navigator
cary
##vine
##hini
##bies
fife
rowe
rowland
posing
insurgents
shafts
lawsuits
activate
conor
inward
culturally
garlic
265
##eering
eclectic
##hui
##kee
##nl
furrowed
vargas
meteorological
rendezvous
##aus
culinary
commencement
##dition
quota
##notes
mommy
salaries
overlapping
mule
##iology
##mology
sums
wentworth
##isk
##zione
mainline
subgroup
##illy
hack
plaintiff
verdi
bulb
differentiation
engagements
multinational
supplemented
bertrand
caller
regis
##naire
##sler
##arts
##imated
blossom
propagation
kilometer
viaduct
vineyards
##uate
beckett
optimization
golfer
songwriters
seminal
semitic
thud
volatile
evolving
ridley
##wley
trivial
distributions
scandinavia
jiang
##ject
wrestled
insistence
##dio
emphasizes
napkin
##ods
adjunct
rhyme
##ricted
##eti
hopeless
surrounds
tremble
32nd
smoky
##ntly
oils
medicinal
padded
steer
wilkes
219
255
concessions
hue
uniquely
blinded
landon
yahoo
##lane
hendrix
commemorating
dex
specify
chicks
##ggio
intercity
1400
morley
##torm
highlighting
##oting
pang
oblique
stalled
##liner
flirting
newborn
1769
bishopric
shaved
232
currie
##ush
dharma
spartan
##ooped
favorites
smug
novella
sirens
abusive
creations
espana
##lage
paradigm
semiconductor
sheen
##rdo
##yen
##zak
nrl
renew
##pose
##tur
adjutant
marches
norma
##enity
ineffective
weimar
grunt
##gat
lordship
plotting
expenditure
infringement
lbs
refrain
av
mimi
mistakenly
postmaster
1771
##bara
ras
motorsports
tito
199
subjective
##zza
bully
stew
##kaya
prescott
1a
##raphic
##zam
bids
styling
paranormal
reeve
sneaking
exploding
katz
akbar
migrant
syllables
indefinitely
##ogical
destroys
replaces
applause
##phine
pest
##fide
218
articulated
bertie
##thing
##cars
##ptic
courtroom
crowley
aesthetics
cummings
tehsil
hormones
titanic
dangerously
##ibe
stadion
jaenelle
auguste
ciudad
##chu
mysore
partisans
##sio
lucan
philipp
##aly
debating
henley
interiors
##rano
##tious
homecoming
beyonce
usher
henrietta
prepares
weeds
##oman
ely
plucked
##pire
##dable
luxurious
##aq
artifact
password
pasture
juno
maddy
minsk
##dder
##ologies
##rone
assessments
martian
royalist
1765
examines
##mani
##rge
nino
223
parry
scooped
relativity
##eli
##uting
##cao
congregational
noisy
traverse
##agawa
strikeouts
nickelodeon
obituary
transylvania
binds
depictions
polk
trolley
##yed
##lard
breeders
##under
dryly
hokkaido
1762
strengths
stacks
bonaparte
connectivity
neared
prostitutes
stamped
anaheim
gutierrez
sinai
##zzling
bram
fresno
madhya
##86
proton
##lena
##llum
##phon
reelected
wanda
##anus
##lb
ample
distinguishing
##yler
grasping
sermons
tomato
bland
stimulation
avenues
##eux
spreads
scarlett
fern
pentagon
assert
baird
chesapeake
ir
calmed
distortion
fatalities
##olis
correctional
pricing
##astic
##gina
prom
dammit
ying
collaborate
##chia
welterweight
33rd
pointer
substitution
bonded
umpire
communicating
multitude
paddle
##obe
federally
intimacy
##insky
betray
ssr
##lett
##lean
##lves
##therapy
airbus
##tery
functioned
ud
bearer
biomedical
netflix
##hire
##nca
condom
brink
ik
##nical
macy
##bet
flap
gma
experimented
jelly
lavender
##icles
##ulia
munro
##mian
##tial
rye
##rle
60th
gigs
hottest
rotated
predictions
fuji
bu
##erence
##omi
barangay
##fulness
##sas
clocks
##rwood
##liness
cereal
roe
wight
decker
uttered
babu
onion
xml
forcibly
##df
petra
sarcasm
hartley
peeled
storytelling
##42
##xley
##ysis
##ffa
fibre
kiel
auditor
fig
harald
greenville
##berries
geographically
nell
quartz
##athic
cemeteries
##lr
crossings
nah
holloway
reptiles
chun
sichuan
snowy
660
corrections
##ivo
zheng
ambassadors
blacksmith
fielded
fluids
hardcover
turnover
medications
melvin
academies
##erton
ro
roach
absorbing
spaniards
colton
##founded
outsider
espionage
kelsey
245
edible
##ulf
dora
establishes
##sham
##tries
contracting
##tania
cinematic
costello
nesting
##uron
connolly
duff
##nology
mma
##mata
fergus
sexes
gi
optics
spectator
woodstock
banning
##hee
##fle
differentiate
outfielder
refinery
226
312
gerhard
horde
lair
drastically
##udi
landfall
##cheng
motorsport
odi
##achi
predominant
quay
skins
##ental
edna
harshly
complementary
murdering
##aves
wreckage
##90
ono
outstretched
lennox
munitions
galen
reconcile
470
scalp
bicycles
gillespie
questionable
rosenberg
guillermo
hostel
jarvis
kabul
volvo
opium
yd
##twined
abuses
decca
outpost
##cino
sensible
neutrality
##64
ponce
anchorage
atkins
turrets
inadvertently
disagree
libre
vodka
reassuring
weighs
##yal
glide
jumper
ceilings
repertory
outs
stain
##bial
envy
##ucible
smashing
heightened
policing
hyun
mixes
lai
prima
##ples
celeste
##bina
lucrative
intervened
kc
manually
##rned
stature
staffed
bun
bastards
nairobi
priced
##auer
thatcher
##kia
tripped
comune
##ogan
##pled
brasil
incentives
emanuel
hereford
musica
##kim
benedictine
biennale
##lani
eureka
gardiner
rb
knocks
sha
##ael
##elled
##onate
efficacy
ventura
masonic
sanford
maize
leverage
##feit
capacities
santana
##aur
novelty
vanilla
##cter
##tour
benin
##oir
##rain
neptune
drafting
tallinn
##cable
humiliation
##boarding
schleswig
fabian
bernardo
liturgy
spectacle
sweeney
pont
routledge
##tment
cosmos
ut
hilt
sleek
universally
##eville
##gawa
typed
##dry
favors
allegheny
glaciers
##rly
recalling
aziz
##log
parasite
requiem
auf
##berto
##llin
illumination
##breaker
##issa
festivities
bows
govern
vibe
vp
333
sprawled
larson
pilgrim
bwf
leaping
##rts
##ssel
alexei
greyhound
hoarse
##dler
##oration
seneca
##cule
gaping
##ulously
##pura
cinnamon
##gens
##rricular
craven
fantasies
houghton
engined
reigned
dictator
supervising
##oris
bogota
commentaries
unnatural
fingernails
spirituality
tighten
##tm
canadiens
protesting
intentional
cheers
sparta
##ytic
##iere
##zine
widen
belgarath
controllers
dodd
iaaf
navarre
##ication
defect
squire
steiner
whisky
##mins
560
inevitably
tome
##gold
chew
##uid
##lid
elastic
##aby
streaked
alliances
jailed
regal
##ined
##phy
czechoslovak
narration
absently
##uld
bluegrass
guangdong
quran
criticizing
hose
hari
##liest
##owa
skier
streaks
deploy
##lom
raft
bose
dialed
huff
##eira
haifa
simplest
bursting
endings
ib
sultanate
##titled
franks
whitman
ensures
sven
##ggs
collaborators
forster
organising
ui
banished
napier
injustice
teller
layered
thump
##otti
roc
battleships
evidenced
fugitive
sadie
robotics
##roud
equatorial
geologist
##iza
yielding
##bron
##sr
internationale
mecca
##diment
sbs
skyline
toad
uploaded
reflective
undrafted
lal
leafs
bayern
##dai
lakshmi
shortlisted
##stick
##wicz
camouflage
donate
af
christi
lau
##acio
disclosed
nemesis
1761
assemble
straining
northamptonshire
tal
##asi
bernardino
premature
heidi
42nd
coefficients
galactic
reproduce
buzzed
sensations
zionist
monsieur
myrtle
##eme
archery
strangled
musically
viewpoint
antiquities
bei
trailers
seahawks
cured
pee
preferring
tasmanian
lange
sul
##mail
##working
colder
overland
lucivar
massey
gatherings
haitian
##smith
disapproval
flaws
##cco
##enbach
1766
npr
##icular
boroughs
creole
forums
techno
1755
dent
abdominal
streetcar
##eson
##stream
procurement
gemini
predictable
##tya
acheron
christoph
feeder
fronts
vendor
bernhard
jammu
tumors
slang
##uber
goaltender
twists
curving
manson
vuelta
mer
peanut
confessions
pouch
unpredictable
allowance
theodor
vascular
##factory
bala
authenticity
metabolic
coughing
nanjing
##cea
pembroke
##bard
splendid
36th
ff
hourly
##ahu
elmer
handel
##ivate
awarding
thrusting
dl
experimentation
##hesion
##46
caressed
entertained
steak
##rangle
biologist
orphans
baroness
oyster
stepfather
##dridge
mirage
reefs
speeding
##31
barons
1764
227
inhabit
preached
repealed
##tral
honoring
boogie
captives
administer
johanna
##imate
gel
suspiciously
1767
sobs
##dington
backbone
hayward
garry
##folding
##nesia
maxi
##oof
##ppe
ellison
galileo
##stand
crimea
frenzy
amour
bumper
matrices
natalia
baking
garth
palestinians
##grove
smack
conveyed
ensembles
gardening
##manship
##rup
##stituting
1640
harvesting
topography
jing
shifters
dormitory
##carriage
##lston
ist
skulls
##stadt
dolores
jewellery
sarawak
##wai
##zier
fences
christy
confinement
tumbling
credibility
fir
stench
##bria
##plication
##nged
##sam
virtues
##belt
marjorie
pba
##eem
##made
celebrates
schooner
agitated
barley
fulfilling
anthropologist
##pro
restrict
novi
regulating
##nent
padres
##rani
##hesive
loyola
tabitha
milky
olson
proprietor
crambidae
guarantees
intercollegiate
ljubljana
hilda
##sko
ignorant
hooded
##lts
sardinia
##lidae
##vation
frontman
privileged
witchcraft
##gp
jammed
laude
poking
##than
bracket
amazement
yunnan
##erus
maharaja
linnaeus
264
commissioning
milano
peacefully
##logies
akira
rani
regulator
##36
grasses
##rance
luzon
crows
compiler
gretchen
seaman
edouard
tab
buccaneers
ellington
hamlets
whig
socialists
##anto
directorial
easton
mythological
##kr
##vary
rhineland
semantic
taut
dune
inventions
succeeds
##iter
replication
branched
##pired
jul
prosecuted
kangaroo
penetrated
##avian
middlesbrough
doses
bleak
madam
predatory
relentless
##vili
reluctance
##vir
hailey
crore
silvery
1759
monstrous
swimmers
transmissions
hawthorn
informing
##eral
toilets
caracas
crouch
kb
##sett
295
cartel
hadley
##aling
alexia
yvonne
##biology
cinderella
eton
superb
blizzard
stabbing
industrialist
maximus
##gm
##orus
groves
maud
clade
oversized
comedic
##bella
rosen
nomadic
fulham
montane
beverages
galaxies
redundant
swarm
##rot
##folia
##llis
buckinghamshire
fen
bearings
bahadur
##rom
gilles
phased
dynamite
faber
benoit
vip
##ount
##wd
booking
fractured
tailored
anya
spices
westwood
cairns
auditions
inflammation
steamed
##rocity
##acion
##urne
skyla
thereof
watford
torment
archdeacon
transforms
lulu
demeanor
fucked
serge
##sor
mckenna
minas
entertainer
##icide
caress
originate
residue
##sty
1740
##ilised
##org
beech
##wana
subsidies
##ghton
emptied
gladstone
ru
firefighters
voodoo
##rcle
het
nightingale
tamara
edmond
ingredient
weaknesses
silhouette
285
compatibility
withdrawing
hampson
##mona
anguish
giggling
##mber
bookstore
##jiang
southernmost
tilting
##vance
bai
economical
rf
briefcase
dreadful
hinted
projections
shattering
totaling
##rogate
analogue
indicted
periodical
fullback
##dman
haynes
##tenberg
##ffs
##ishment
1745
thirst
stumble
penang
vigorous
##ddling
##kor
##lium
octave
##ove
##enstein
##inen
##ones
siberian
##uti
cbn
repeal
swaying
##vington
khalid
tanaka
unicorn
otago
plastered
lobe
riddle
##rella
perch
##ishing
croydon
filtered
graeme
tripoli
##ossa
crocodile
##chers
sufi
mined
##tung
inferno
lsu
##phi
swelled
utilizes
£2
cale
periodicals
styx
hike
informally
coop
lund
##tidae
ala
hen
qui
transformations
disposed
sheath
chickens
##cade
fitzroy
sas
silesia
unacceptable
odisha
1650
sabrina
pe
spokane
ratios
athena
massage
shen
dilemma
##drum
##riz
##hul
corona
doubtful
niall
##pha
##bino
fines
cite
acknowledging
bangor
ballard
bathurst
##resh
huron
mustered
alzheimer
garments
kinase
tyre
warship
##cp
flashback
pulmonary
braun
cheat
kamal
cyclists
constructions
grenades
ndp
traveller
excuses
stomped
signalling
trimmed
futsal
mosques
relevance
##wine
wta
##23
##vah
##lter
hoc
##riding
optimistic
##´s
deco
sim
interacting
rejecting
moniker
waterways
##ieri
##oku
mayors
gdansk
outnumbered
pearls
##ended
##hampton
fairs
totals
dominating
262
notions
stairway
compiling
pursed
commodities
grease
yeast
##jong
carthage
griffiths
residual
amc
contraction
laird
sapphire
##marine
##ivated
amalgamation
dissolve
inclination
lyle
packaged
altitudes
suez
canons
graded
lurched
narrowing
boasts
guise
wed
enrico
##ovsky
rower
scarred
bree
cub
iberian
protagonists
bargaining
proposing
trainers
voyages
vans
fishes
##aea
##ivist
##verance
encryption
artworks
kazan
sabre
cleopatra
hepburn
rotting
supremacy
mecklenburg
##brate
burrows
hazards
outgoing
flair
organizes
##ctions
scorpion
##usions
boo
234
chevalier
dunedin
slapping
##34
ineligible
pensions
##38
##omic
manufactures
emails
bismarck
238
weakening
blackish
ding
mcgee
quo
##rling
northernmost
xx
manpower
greed
sampson
clicking
##ange
##horpe
##inations
##roving
torre
##eptive
##moral
symbolism
38th
asshole
meritorious
outfits
splashed
biographies
sprung
astros
##tale
302
737
filly
raoul
nw
tokugawa
linden
clubhouse
##apa
tracts
romano
##pio
putin
tags
##note
chained
dickson
gunshot
moe
gunn
rashid
##tails
zipper
##bas
##nea
contrasted
##ply
##udes
plum
pharaoh
##pile
aw
comedies
ingrid
sandwiches
subdivisions
1100
mariana
nokia
kamen
hz
delaney
veto
herring
##words
possessive
outlines
##roup
siemens
stairwell
rc
gallantry
messiah
palais
yells
233
zeppelin
##dm
bolivar
##cede
smackdown
mckinley
##mora
##yt
muted
geologic
finely
unitary
avatar
hamas
maynard
rees
bog
contrasting
##rut
liv
chico
disposition
pixel
##erate
becca
dmitry
yeshiva
narratives
##lva
##ulton
mercenary
sharpe
tempered
navigate
stealth
amassed
keynes
##lini
untouched
##rrie
havoc
lithium
##fighting
abyss
graf
southward
wolverine
balloons
implements
ngos
transitions
##icum
ambushed
concacaf
dormant
economists
##dim
costing
csi
rana
universite
boulders
verity
##llon
collin
mellon
misses
cypress
fluorescent
lifeless
spence
##ulla
crewe
shepard
pak
revelations
##م
jolly
gibbons
paw
##dro
##quel
freeing
##test
shack
fries
palatine
##51
##hiko
accompaniment
cruising
recycled
##aver
erwin
sorting
synthesizers
dyke
realities
sg
strides
enslaved
wetland
##ghan
competence
gunpowder
grassy
maroon
reactors
objection
##oms
carlson
gearbox
macintosh
radios
shelton
##sho
clergyman
prakash
254
mongols
trophies
oricon
228
stimuli
twenty20
cantonese
cortes
mirrored
##saurus
bhp
cristina
melancholy
##lating
enjoyable
nuevo
##wny
downfall
schumacher
##ind
banging
lausanne
rumbled
paramilitary
reflex
ax
amplitude
migratory
##gall
##ups
midi
barnard
lastly
sherry
##hp
##nall
keystone
##kra
carleton
slippery
##53
coloring
foe
socket
otter
##rgos
mats
##tose
consultants
bafta
bison
topping
##km
490
primal
abandonment
transplant
atoll
hideous
mort
pained
reproduced
tae
howling
##turn
unlawful
billionaire
hotter
poised
lansing
##chang
dinamo
retro
messing
nfc
domesday
##mina
blitz
timed
##athing
##kley
ascending
gesturing
##izations
signaled
tis
chinatown
mermaid
savanna
jameson
##aint
catalina
##pet
##hers
cochrane
cy
chatting
##kus
alerted
computation
mused
noelle
majestic
mohawk
campo
octagonal
##sant
##hend
241
aspiring
##mart
comprehend
iona
paralyzed
shimmering
swindon
rhone
##eley
reputed
configurations
pitchfork
agitation
francais
gillian
lipstick
##ilo
outsiders
pontifical
resisting
bitterness
sewer
rockies
##edd
##ucher
misleading
1756
exiting
galloway
##nging
risked
##heart
246
commemoration
schultz
##rka
integrating
##rsa
poses
shrieked
##weiler
guineas
gladys
jerking
owls
goldsmith
nightly
penetrating
##unced
lia
##33
ignited
betsy
##aring
##thorpe
follower
vigorously
##rave
coded
kiran
knit
zoology
tbilisi
##28
##bered
repository
govt
deciduous
dino
growling
##bba
enhancement
unleashed
chanting
pussy
biochemistry
##eric
kettle
repression
toxicity
nrhp
##arth
##kko
##bush
ernesto
commended
outspoken
242
mca
parchment
sms
kristen
##aton
bisexual
raked
glamour
navajo
a2
conditioned
showcased
##hma
spacious
youthful
##esa
usl
appliances
junta
brest
layne
conglomerate
enchanted
chao
loosened
picasso
circulating
inspect
montevideo
##centric
##kti
piazza
spurred
##aith
bari
freedoms
poultry
stamford
lieu
##ect
indigo
sarcastic
bahia
stump
attach
dvds
frankenstein
lille
approx
scriptures
pollen
##script
nmi
overseen
##ivism
tides
proponent
newmarket
inherit
milling
##erland
centralized
##rou
distributors
credentials
drawers
abbreviation
##lco
##xon
downing
uncomfortably
ripe
##oes
erase
franchises
##ever
populace
##bery
##khar
decomposition
pleas
##tet
daryl
sabah
##stle
##wide
fearless
genie
lesions
annette
##ogist
oboe
appendix
nair
dripped
petitioned
maclean
mosquito
parrot
rpg
hampered
1648
operatic
reservoirs
##tham
irrelevant
jolt
summarized
##fp
medallion
##taff
##−
clawed
harlow
narrower
goddard
marcia
bodied
fremont
suarez
altering
tempest
mussolini
porn
##isms
sweetly
oversees
walkers
solitude
grimly
shrines
hk
ich
supervisors
hostess
dietrich
legitimacy
brushes
expressive
##yp
dissipated
##rse
localized
systemic
##nikov
gettysburg
##js
##uaries
dialogues
muttering
251
housekeeper
sicilian
discouraged
##frey
beamed
kaladin
halftime
kidnap
##amo
##llet
1754
synonymous
depleted
instituto
insulin
reprised
##opsis
clashed
##ctric
interrupting
radcliffe
insisting
medici
1715
ejected
playfully
turbulent
##47
starvation
##rini
shipment
rebellious
petersen
verification
merits
##rified
cakes
##charged
1757
milford
shortages
spying
fidelity
##aker
emitted
storylines
harvested
seismic
##iform
cheung
kilda
theoretically
barbie
lynx
##rgy
##tius
goblin
mata
poisonous
##nburg
reactive
residues
obedience
##евич
conjecture
##rac
401
hating
sixties
kicker
moaning
motown
##bha
emancipation
neoclassical
##hering
consoles
ebert
professorship
##tures
sustaining
assaults
obeyed
affluent
incurred
tornadoes
##eber
##zow
emphasizing
highlanders
cheated
helmets
##ctus
internship
terence
bony
executions
legislators
berries
peninsular
tinged
##aco
1689
amplifier
corvette
ribbons
lavish
pennant
##lander
worthless
##chfield
##forms
mariano
pyrenees
expenditures
##icides
chesterfield
mandir
tailor
39th
sergey
nestled
willed
aristocracy
devotees
goodnight
raaf
rumored
weaponry
remy
appropriations
harcourt
burr
riaa
##lence
limitation
unnoticed
guo
soaking
swamps
##tica
collapsing
tatiana
descriptive
brigham
psalm
##chment
maddox
##lization
patti
caliph
##aja
akron
injuring
serra
##ganj
basins
##sari
astonished
launcher
##church
hilary
wilkins
sewing
##sf
stinging
##fia
##ncia
underwood
startup
##ition
compilations
vibrations
embankment
jurist
##nity
bard
juventus
groundwater
kern
palaces
helium
boca
cramped
marissa
soto
##worm
jae
princely
##ggy
faso
bazaar
warmly
##voking
229
pairing
##lite
##grate
##nets
wien
freaked
ulysses
rebirth
##alia
##rent
mummy
guzman
jimenez
stilled
##nitz
trajectory
tha
woken
archival
professions
##pts
##pta
hilly
shadowy
shrink
##bolt
norwood
glued
migrate
stereotypes
devoid
##pheus
625
evacuate
horrors
infancy
gotham
knowles
optic
downloaded
sachs
kingsley
parramatta
darryl
mor
##onale
shady
commence
confesses
kan
##meter
##placed
marlborough
roundabout
regents
frigates
io
##imating
gothenburg
revoked
carvings
clockwise
convertible
intruder
##sche
banged
##ogo
vicky
bourgeois
##mony
dupont
footing
##gum
pd
##real
buckle
yun
penthouse
sane
720
serviced
stakeholders
neumann
bb
##eers
comb
##gam
catchment
pinning
rallies
typing
##elles
forefront
freiburg
sweetie
giacomo
widowed
goodwill
worshipped
aspirations
midday
##vat
fishery
##trick
bournemouth
turk
243
hearth
ethanol
guadalajara
murmurs
sl
##uge
afforded
scripted
##hta
wah
##jn
coroner
translucent
252
memorials
puck
progresses
clumsy
##race
315
candace
recounted
##27
##slin
##uve
filtering
##mac
howl
strata
heron
leveled
##ays
dubious
##oja
##т
##wheel
citations
exhibiting
##laya
##mics
##pods
turkic
##lberg
injunction
##ennial
##mit
antibodies
##44
organise
##rigues
cardiovascular
cushion
inverness
##zquez
dia
cocoa
sibling
##tman
##roid
expanse
feasible
tunisian
algiers
##relli
rus
bloomberg
dso
westphalia
bro
tacoma
281
downloads
##ours
konrad
duran
##hdi
continuum
jett
compares
legislator
secession
##nable
##gues
##zuka
translating
reacher
##gley
##ła
aleppo
##agi
tc
orchards
trapping
linguist
versatile
drumming
postage
calhoun
superiors
##mx
barefoot
leary
##cis
ignacio
alfa
kaplan
##rogen
bratislava
mori
##vot
disturb
haas
313
cartridges
gilmore
radiated
salford
tunic
hades
##ulsive
archeological
delilah
magistrates
auditioned
brewster
charters
empowerment
blogs
cappella
dynasties
iroquois
whipping
##krishna
raceway
truths
myra
weaken
judah
mcgregor
##horse
mic
refueling
37th
burnley
bosses
markus
premio
query
##gga
dunbar
##economic
darkest
lyndon
sealing
commendation
reappeared
##mun
addicted
ezio
slaughtered
satisfactory
shuffle
##eves
##thic
##uj
fortification
warrington
##otto
resurrected
fargo
mane
##utable
##lei
##space
foreword
ox
##aris
##vern
abrams
hua
##mento
sakura
##alo
uv
sentimental
##skaya
midfield
##eses
sturdy
scrolls
macleod
##kyu
entropy
##lance
mitochondrial
cicero
excelled
thinner
convoys
perceive
##oslav
##urable
systematically
grind
burkina
287
##tagram
ops
##aman
guantanamo
##cloth
##tite
forcefully
wavy
##jou
pointless
##linger
##tze
layton
portico
superficial
clerical
outlaws
##hism
burials
muir
##inn
creditors
hauling
rattle
##leg
calais
monde
archers
reclaimed
dwell
wexford
hellenic
falsely
remorse
##tek
dough
furnishings
##uttered
gabon
neurological
novice
##igraphy
contemplated
pulpit
nightstand
saratoga
##istan
documenting
pulsing
taluk
##firmed
busted
marital
##rien
disagreements
wasps
##yes
hodge
mcdonnell
mimic
fran
pendant
dhabi
musa
##nington
congratulations
argent
darrell
concussion
losers
regrets
thessaloniki
reversal
donaldson
hardwood
thence
achilles
ritter
##eran
demonic
jurgen
prophets
goethe
eki
classmate
buff
##cking
yank
irrational
##inging
perished
seductive
qur
sourced
##crat
##typic
mustard
ravine
barre
horizontally
characterization
phylogenetic
boise
##dit
##runner
##tower
brutally
intercourse
seduce
##bbing
fay
ferris
ogden
amar
nik
unarmed
##inator
evaluating
kyrgyzstan
sweetness
##lford
##oki
mccormick
meiji
notoriety
stimulate
disrupt
figuring
instructional
mcgrath
##zoo
groundbreaking
##lto
flinch
khorasan
agrarian
bengals
mixer
radiating
##sov
ingram
pitchers
nad
tariff
##cript
tata
##codes
##emi
##ungen
appellate
lehigh
##bled
##giri
brawl
duct
texans
##ciation
##ropolis
skipper
speculative
vomit
doctrines
stresses
253
davy
graders
whitehead
jozef
timely
cumulative
haryana
paints
appropriately
boon
cactus
##ales
##pid
dow
legions
##pit
perceptions
1730
picturesque
##yse
periphery
rune
wr
##aha
celtics
sentencing
whoa
##erin
confirms
variance
425
moines
mathews
spade
rave
m1
fronted
fx
blending
alleging
reared
##gl
237
##paper
grassroots
eroded
##free
##physical
directs
ordeal
##sław
accelerate
hacker
rooftop
##inia
lev
buys
cebu
devote
##lce
specialising
##ulsion
choreographed
repetition
warehouses
##ryl
paisley
tuscany
analogy
sorcerer
hash
huts
shards
descends
exclude
nix
chaplin
gaga
ito
vane
##drich
causeway
misconduct
limo
orchestrated
glands
jana
##kot
u2
##mple
##sons
branching
contrasts
scoop
longed
##virus
chattanooga
##75
syrup
cornerstone
##tized
##mind
##iaceae
careless
precedence
frescoes
##uet
chilled
consult
modelled
snatch
peat
##thermal
caucasian
humane
relaxation
spins
temperance
##lbert
occupations
lambda
hybrids
moons
mp3
##oese
247
rolf
societal
yerevan
ness
##ssler
befriended
mechanized
nominate
trough
boasted
cues
seater
##hom
bends
##tangle
conductors
emptiness
##lmer
eurasian
adriatic
tian
##cie
anxiously
lark
propellers
chichester
jock
ev
2a
##holding
credible
recounts
tori
loyalist
abduction
##hoot
##redo
nepali
##mite
ventral
tempting
##ango
##crats
steered
##wice
javelin
dipping
laborers
prentice
looming
titanium
##ː
badges
emir
tensor
##ntation
egyptians
rash
denies
hawthorne
lombard
showers
wehrmacht
dietary
trojan
##reus
welles
executing
horseshoe
lifeboat
##lak
elsa
infirmary
nearing
roberta
boyer
mutter
trillion
joanne
##fine
##oked
sinks
vortex
uruguayan
clasp
sirius
##block
accelerator
prohibit
sunken
byu
chronological
diplomats
ochreous
510
symmetrical
1644
maia
##tology
salts
reigns
atrocities
##ия
hess
bared
issn
##vyn
cater
saturated
##cycle
##isse
sable
voyager
dyer
yusuf
##inge
fountains
wolff
##39
##nni
engraving
rollins
atheist
ominous
##ault
herr
chariot
martina
strung
##fell
##farlane
horrific
sahib
gazes
saetan
erased
ptolemy
##olic
flushing
lauderdale
analytic
##ices
530
navarro
beak
gorilla
herrera
broom
guadalupe
raiding
sykes
311
bsc
deliveries
1720
invasions
carmichael
tajikistan
thematic
ecumenical
sentiments
onstage
##rians
##brand
##sume
catastrophic
flanks
molten
##arns
waller
aimee
terminating
##icing
alternately
##oche
nehru
printers
outraged
##eving
empires
template
banners
repetitive
za
##oise
vegetarian
##tell
guiana
opt
cavendish
lucknow
synthesized
##hani
##mada
finalized
##ctable
fictitious
mayoral
unreliable
##enham
embracing
peppers
rbis
##chio
##neo
inhibition
slashed
togo
orderly
embroidered
safari
salty
236
barron
benito
totaled
##dak
pubs
simulated
caden
devin
tolkien
momma
welding
sesame
##ept
gottingen
hardness
630
shaman
temeraire
620
adequately
pediatric
##kit
ck
assertion
radicals
composure
cadence
seafood
beaufort
lazarus
mani
warily
cunning
kurdistan
249
cantata
##kir
ares
##41
##clusive
nape
townland
geared
insulted
flutter
boating
violate
draper
dumping
malmo
##hh
##romatic
firearm
alta
bono
obscured
##clave
exceeds
panorama
unbelievable
##train
preschool
##essed
disconnected
installing
rescuing
secretaries
accessibility
##castle
##drive
##ifice
##film
bouts
slug
waterway
mindanao
##buro
##ratic
halves
##ل
calming
liter
maternity
adorable
bragg
electrification
mcc
##dote
roxy
schizophrenia
##body
munoz
kaye
whaling
239
mil
tingling
tolerant
##ago
unconventional
volcanoes
##finder
deportivo
##llie
robson
kaufman
neuroscience
wai
deportation
masovian
scraping
converse
##bh
hacking
bulge
##oun
administratively
yao
580
amp
mammoth
booster
claremont
hooper
nomenclature
pursuits
mclaughlin
melinda
##sul
catfish
barclay
substrates
taxa
zee
originals
kimberly
packets
padma
##ality
borrowing
ostensibly
solvent
##bri
##genesis
##mist
lukas
shreveport
veracruz
##ь
##lou
##wives
cheney
tt
anatolia
hobbs
##zyn
cyclic
radiant
alistair
greenish
siena
dat
independents
##bation
conform
pieter
hyper
applicant
bradshaw
spores
telangana
vinci
inexpensive
nuclei
322
jang
nme
soho
spd
##ign
cradled
receptionist
pow
##43
##rika
fascism
##ifer
experimenting
##ading
##iec
##region
345
jocelyn
maris
stair
nocturnal
toro
constabulary
elgin
##kker
msc
##giving
##schen
##rase
doherty
doping
sarcastically
batter
maneuvers
##cano
##apple
##gai
##git
intrinsic
##nst
##stor
1753
showtime
cafes
gasps
lviv
ushered
##thed
fours
restart
astonishment
transmitting
flyer
shrugs
##sau
intriguing
cones
dictated
mushrooms
medial
##kovsky
##elman
escorting
gaped
##26
godfather
##door
##sell
djs
recaptured
timetable
vila
1710
3a
aerodrome
mortals
scientology
##orne
angelina
mag
convection
unpaid
insertion
intermittent
lego
##nated
endeavor
kota
pereira
##lz
304
bwv
glamorgan
insults
agatha
fey
##cend
fleetwood
mahogany
protruding
steamship
zeta
##arty
mcguire
suspense
##sphere
advising
urges
##wala
hurriedly
meteor
gilded
inline
arroyo
stalker
##oge
excitedly
revered
##cure
earle
introductory
##break
##ilde
mutants
puff
pulses
reinforcement
##haling
curses
lizards
stalk
correlated
##fixed
fallout
macquarie
##unas
bearded
denton
heaving
802
##ocation
winery
assign
dortmund
##lkirk
everest
invariant
charismatic
susie
##elling
bled
lesley
telegram
sumner
bk
##ogen
##к
wilcox
needy
colbert
duval
##iferous
##mbled
allotted
attends
imperative
##hita
replacements
hawker
##inda
insurgency
##zee
##eke
casts
##yla
680
ives
transitioned
##pack
##powering
authoritative
baylor
flex
cringed
plaintiffs
woodrow
##skie
drastic
ape
aroma
unfolded
commotion
nt
preoccupied
theta
routines
lasers
privatization
wand
domino
ek
clenching
nsa
strategically
showered
bile
handkerchief
pere
storing
christophe
insulting
316
nakamura
romani
asiatic
magdalena
palma
cruises
stripping
405
konstantin
soaring
##berman
colloquially
forerunner
havilland
incarcerated
parasites
sincerity
##utus
disks
plank
saigon
##ining
corbin
homo
ornaments
powerhouse
##tlement
chong
fastened
feasibility
idf
morphological
usable
##nish
##zuki
aqueduct
jaguars
keepers
##flies
aleksandr
faust
assigns
ewing
bacterium
hurled
tricky
hungarians
integers
wallis
321
yamaha
##isha
hushed
oblivion
aviator
evangelist
friars
##eller
monograph
ode
##nary
airplanes
labourers
charms
##nee
1661
hagen
tnt
rudder
fiesta
transcript
dorothea
ska
inhibitor
maccabi
retorted
raining
encompassed
clauses
menacing
1642
lineman
##gist
vamps
##ape
##dick
gloom
##rera
dealings
easing
seekers
##nut
##pment
helens
unmanned
##anu
##isson
basics
##amy
##ckman
adjustments
1688
brutality
horne
##zell
sui
##55
##mable
aggregator
##thal
rhino
##drick
##vira
counters
zoom
##01
##rting
mn
montenegrin
packard
##unciation
##♭
##kki
reclaim
scholastic
thugs
pulsed
##icia
syriac
quan
saddam
banda
kobe
blaming
buddies
dissent
##lusion
##usia
corbett
jaya
delle
erratic
lexie
##hesis
435
amiga
hermes
##pressing
##leen
chapels
gospels
jamal
##uating
compute
revolving
warp
##sso
##thes
armory
##eras
##gol
antrim
loki
##kow
##asian
##good
##zano
braid
handwriting
subdistrict
funky
pantheon
##iculate
concurrency
estimation
improper
juliana
##his
newcomers
johnstone
staten
communicated
##oco
##alle
sausage
stormy
##stered
##tters
superfamily
##grade
acidic
collateral
tabloid
##oped
##rza
bladder
austen
##ellant
mcgraw
##hay
hannibal
mein
aquino
lucifer
wo
badger
boar
cher
christensen
greenberg
interruption
##kken
jem
244
mocked
bottoms
cambridgeshire
##lide
sprawling
##bbly
eastwood
ghent
synth
##buck
advisers
##bah
nominally
hapoel
qu
daggers
estranged
fabricated
towels
vinnie
wcw
misunderstanding
anglia
nothin
unmistakable
##dust
##lova
chilly
marquette
truss
##edge
##erine
reece
##lty
##chemist
##connected
272
308
41st
bash
raion
waterfalls
##ump
##main
labyrinth
queue
theorist
##istle
bharatiya
flexed
soundtracks
rooney
leftist
patrolling
wharton
plainly
alleviate
eastman
schuster
topographic
engages
immensely
unbearable
fairchild
1620
dona
lurking
parisian
oliveira
ia
indictment
hahn
bangladeshi
##aster
vivo
##uming
##ential
antonia
expects
indoors
kildare
harlan
##logue
##ogenic
##sities
forgiven
##wat
childish
tavi
##mide
##orra
plausible
grimm
successively
scooted
##bola
##dget
##rith
spartans
emery
flatly
azure
epilogue
##wark
flourish
##iny
##tracted
##overs
##oshi
bestseller
distressed
receipt
spitting
hermit
topological
##cot
drilled
subunit
francs
##layer
eel
##fk
##itas
octopus
footprint
petitions
ufo
##say
##foil
interfering
leaking
palo
##metry
thistle
valiant
##pic
narayan
mcpherson
##fast
gonzales
##ym
##enne
dustin
novgorod
solos
##zman
doin
##raph
##patient
##meyer
soluble
ashland
cuffs
carole
pendleton
whistling
vassal
##river
deviation
revisited
constituents
rallied
rotate
loomed
##eil
##nting
amateurs
augsburg
auschwitz
crowns
skeletons
##cona
bonnet
257
dummy
globalization
simeon
sleeper
mandal
differentiated
##crow
##mare
milne
bundled
exasperated
talmud
owes
segregated
##feng
##uary
dentist
piracy
props
##rang
devlin
##torium
malicious
paws
##laid
dependency
##ergy
##fers
##enna
258
pistons
rourke
jed
grammatical
tres
maha
wig
512
ghostly
jayne
##achal
##creen
##ilis
##lins
##rence
designate
##with
arrogance
cambodian
clones
showdown
throttle
twain
##ception
lobes
metz
nagoya
335
braking
##furt
385
roaming
##minster
amin
crippled
##37
##llary
indifferent
hoffmann
idols
intimidating
1751
261
influenza
memo
onions
1748
bandage
consciously
##landa
##rage
clandestine
observes
swiped
tangle
##ener
##jected
##trum
##bill
##lta
hugs
congresses
josiah
spirited
##dek
humanist
managerial
filmmaking
inmate
rhymes
debuting
grimsby
ur
##laze
duplicate
vigor
##tf
republished
bolshevik
refurbishment
antibiotics
martini
methane
newscasts
royale
horizons
levant
iain
visas
##ischen
paler
##around
manifestation
snuck
alf
chop
futile
pedestal
rehab
##kat
bmg
kerman
res
fairbanks
jarrett
abstraction
saharan
##zek
1746
procedural
clearer
kincaid
sash
luciano
##ffey
crunch
helmut
##vara
revolutionaries
##tute
creamy
leach
##mmon
1747
permitting
nes
plight
wendell
##lese
contra
ts
clancy
ipa
mach
staples
autopsy
disturbances
nueva
karin
pontiac
##uding
proxy
venerable
haunt
leto
bergman
expands
##helm
wal
##pipe
canning
celine
cords
obesity
##enary
intrusion
planner
##phate
reasoned
sequencing
307
harrow
##chon
##dora
marred
mcintyre
repay
tarzan
darting
248
harrisburg
margarita
repulsed
##hur
##lding
belinda
hamburger
novo
compliant
runways
bingham
registrar
skyscraper
ic
cuthbert
improvisation
livelihood
##corp
##elial
admiring
##dened
sporadic
believer
casablanca
popcorn
##29
asha
shovel
##bek
##dice
coiled
tangible
##dez
casper
elsie
resin
tenderness
rectory
##ivision
avail
sonar
##mori
boutique
##dier
guerre
bathed
upbringing
vaulted
sandals
blessings
##naut
##utnant
1680
306
foxes
pia
corrosion
hesitantly
confederates
crystalline
footprints
shapiro
tirana
valentin
drones
45th
microscope
shipments
texted
inquisition
wry
guernsey
unauthorized
resigning
760
ripple
schubert
stu
reassure
felony
##ardo
brittle
koreans
##havan
##ives
dun
implicit
tyres
##aldi
##lth
magnolia
##ehan
##puri
##poulos
aggressively
fei
gr
familiarity
##poo
indicative
##trust
fundamentally
jimmie
overrun
395
anchors
moans
##opus
britannia
armagh
##ggle
purposely
seizing
##vao
bewildered
mundane
avoidance
cosmopolitan
geometridae
quartermaster
caf
415
chatter
engulfed
gleam
purge
##icate
juliette
jurisprudence
guerra
revisions
##bn
casimir
brew
##jm
1749
clapton
cloudy
conde
hermitage
278
simulations
torches
vincenzo
matteo
##rill
hidalgo
booming
westbound
accomplishment
tentacles
unaffected
##sius
annabelle
flopped
sloping
##litz
dreamer
interceptor
vu
##loh
consecration
copying
messaging
breaker
climates
hospitalized
1752
torino
afternoons
winfield
witnessing
##teacher
breakers
choirs
sawmill
coldly
##ege
sipping
haste
uninhabited
conical
bibliography
pamphlets
severn
edict
##oca
deux
illnesses
grips
##pl
rehearsals
sis
thinkers
tame
##keepers
1690
acacia
reformer
##osed
##rys
shuffling
##iring
##shima
eastbound
ionic
rhea
flees
littered
##oum
rocker
vomiting
groaning
champ
overwhelmingly
civilizations
paces
sloop
adoptive
##tish
skaters
##vres
aiding
mango
##joy
nikola
shriek
##ignon
pharmaceuticals
##mg
tuna
calvert
gustavo
stocked
yearbook
##urai
##mana
computed
subsp
riff
hanoi
kelvin
hamid
moors
pastures
summons
jihad
nectar
##ctors
bayou
untitled
pleasing
vastly
republics
intellect
##η
##ulio
##tou
crumbling
stylistic
sb
##ی
consolation
frequented
h₂o
walden
widows
##iens
404
##ignment
chunks
improves
288
grit
recited
##dev
snarl
sociological
##arte
##gul
inquired
##held
bruise
clube
consultancy
homogeneous
hornets
multiplication
pasta
prick
savior
##grin
##kou
##phile
yoon
##gara
grimes
vanishing
cheering
reacting
bn
distillery
##quisite
##vity
coe
dockyard
massif
##jord
escorts
voss
##valent
byte
chopped
hawke
illusions
workings
floats
##koto
##vac
kv
annapolis
madden
##onus
alvaro
noctuidae
##cum
##scopic
avenge
steamboat
forte
illustrates
erika
##trip
570
dew
nationalities
bran
manifested
thirsty
diversified
muscled
reborn
##standing
arson
##lessness
##dran
##logram
##boys
##kushima
##vious
willoughby
##phobia
286
alsace
dashboard
yuki
##chai
granville
myspace
publicized
tricked
##gang
adjective
##ater
relic
reorganisation
enthusiastically
indications
saxe
##lassified
consolidate
iec
padua
helplessly
ramps
renaming
regulars
pedestrians
accents
convicts
inaccurate
lowers
mana
##pati
barrie
bjp
outta
someplace
berwick
flanking
invoked
marrow
sparsely
excerpts
clothed
rei
##ginal
wept
##straße
##vish
alexa
excel
##ptive
membranes
aquitaine
creeks
cutler
sheppard
implementations
ns
##dur
fragrance
budge
concordia
magnesium
marcelo
##antes
gladly
vibrating
##rral
##ggles
montrose
##omba
lew
seamus
1630
cocky
##ament
##uen
bjorn
##rrick
fielder
fluttering
##lase
methyl
kimberley
mcdowell
reductions
barbed
##jic
##tonic
aeronautical
condensed
distracting
##promising
huffed
##cala
##sle
claudius
invincible
missy
pious
balthazar
ci
##lang
butte
combo
orson
##dication
myriad
1707
silenced
##fed
##rh
coco
netball
yourselves
##oza
clarify
heller
peg
durban
etudes
offender
roast
blackmail
curvature
##woods
vile
309
illicit
suriname
##linson
overture
1685
bubbling
gymnast
tucking
##mming
##ouin
maldives
##bala
gurney
##dda
##eased
##oides
backside
pinto
jars
racehorse
tending
##rdial
baronetcy
wiener
duly
##rke
barbarian
cupping
flawed
##thesis
bertha
pleistocene
puddle
swearing
##nob
##tically
fleeting
prostate
amulet
educating
##mined
##iti
##tler
75th
jens
respondents
analytics
cavaliers
papacy
raju
##iente
##ulum
##tip
funnel
271
disneyland
##lley
sociologist
##iam
2500
faulkner
louvre
menon
##dson
276
##ower
afterlife
mannheim
peptide
referees
comedians
meaningless
##anger
##laise
fabrics
hurley
renal
sleeps
##bour
##icle
breakout
kristin
roadside
animator
clover
disdain
unsafe
redesign
##urity
firth
barnsley
portage
reset
narrows
268
commandos
expansive
speechless
tubular
##lux
essendon
eyelashes
smashwords
##yad
##bang
##claim
craved
sprinted
chet
somme
astor
wrocław
orton
266
bane
##erving
##uing
mischief
##amps
##sund
scaling
terre
##xious
impairment
offenses
undermine
moi
soy
contiguous
arcadia
inuit
seam
##tops
macbeth
rebelled
##icative
##iot
590
elaborated
frs
uniformed
##dberg
259
powerless
priscilla
stimulated
980
qc
arboretum
frustrating
trieste
bullock
##nified
enriched
glistening
intern
##adia
locus
nouvelle
ollie
ike
lash
starboard
ee
tapestry
headlined
hove
rigged
##vite
pollock
##yme
thrive
clustered
cas
roi
gleamed
olympiad
##lino
pressured
regimes
##hosis
##lick
ripley
##ophone
kickoff
gallon
rockwell
##arable
crusader
glue
revolutions
scrambling
1714
grover
##jure
englishman
aztec
263
contemplating
coven
ipad
preach
triumphant
tufts
##esian
rotational
##phus
328
falkland
##brates
strewn
clarissa
rejoin
environmentally
glint
banded
drenched
moat
albanians
johor
rr
maestro
malley
nouveau
shaded
taxonomy
v6
adhere
bunk
airfields
##ritan
1741
encompass
remington
tran
##erative
amelie
mazda
friar
morals
passions
##zai
breadth
vis
##hae
argus
burnham
caressing
insider
rudd
##imov
##mini
##rso
italianate
murderous
textual
wainwright
armada
bam
weave
timer
##taken
##nh
fra
##crest
ardent
salazar
taps
tunis
##ntino
allegro
gland
philanthropic
##chester
implication
##optera
esq
judas
noticeably
wynn
##dara
inched
indexed
crises
villiers
bandit
royalties
patterned
cupboard
interspersed
accessory
isla
kendrick
entourage
stitches
##esthesia
headwaters
##ior
interlude
distraught
draught
1727
##basket
biased
sy
transient
triad
subgenus
adapting
kidd
shortstop
##umatic
dimly
spiked
mcleod
reprint
nellie
pretoria
windmill
##cek
singled
##mps
273
reunite
##orous
747
bankers
outlying
##omp
##ports
##tream
apologies
cosmetics
patsy
##deh
##ocks
##yson
bender
nantes
serene
##nad
lucha
mmm
323
##cius
##gli
cmll
coinage
nestor
juarez
##rook
smeared
sprayed
twitching
sterile
irina
embodied
juveniles
enveloped
miscellaneous
cancers
dq
gulped
luisa
crested
swat
donegal
ref
##anov
##acker
hearst
mercantile
##lika
doorbell
ua
vicki
##alla
##som
bilbao
psychologists
stryker
sw
horsemen
turkmenistan
wits
##national
anson
mathew
screenings
##umb
rihanna
##agne
##nessy
aisles
##iani
##osphere
hines
kenton
saskatoon
tasha
truncated
##champ
##itan
mildred
advises
fredrik
interpreting
inhibitors
##athi
spectroscopy
##hab
##kong
karim
panda
##oia
##nail
##vc
conqueror
kgb
leukemia
##dity
arrivals
cheered
pisa
phosphorus
shielded
##riated
mammal
unitarian
urgently
chopin
sanitary
##mission
spicy
drugged
hinges
##tort
tipping
trier
impoverished
westchester
##caster
267
epoch
nonstop
##gman
##khov
aromatic
centrally
cerro
##tively
##vio
billions
modulation
sedimentary
283
facilitating
outrageous
goldstein
##eak
##kt
ld
maitland
penultimate
pollard
##dance
fleets
spaceship
vertebrae
##nig
alcoholism
als
recital
##bham
##ference
##omics
m2
##bm
trois
##tropical
##в
commemorates
##meric
marge
##raction
1643
670
cosmetic
ravaged
##ige
catastrophe
eng
##shida
albrecht
arterial
bellamy
decor
harmon
##rde
bulbs
synchronized
vito
easiest
shetland
shielding
wnba
##glers
##ssar
##riam
brianna
cumbria
##aceous
##rard
cores
thayer
##nsk
brood
hilltop
luminous
carts
keynote
larkin
logos
##cta
##ا
##mund
##quay
lilith
tinted
277
wrestle
mobilization
##uses
sequential
siam
bloomfield
takahashi
274
##ieving
presenters
ringo
blazed
witty
##oven
##ignant
devastation
haydn
harmed
newt
therese
##peed
gershwin
molina
rabbis
sudanese
001
innate
restarted
##sack
##fus
slices
wb
##shah
enroll
hypothetical
hysterical
1743
fabio
indefinite
warped
##hg
exchanging
525
unsuitable
##sboro
gallo
1603
bret
cobalt
homemade
##hunter
mx
operatives
##dhar
terraces
durable
latch
pens
whorls
##ctuated
##eaux
billing
ligament
succumbed
##gly
regulators
spawn
##brick
##stead
filmfare
rochelle
##nzo
1725
circumstance
saber
supplements
##nsky
##tson
crowe
wellesley
carrot
##9th
##movable
primate
drury
sincerely
topical
##mad
##rao
callahan
kyiv
smarter
tits
undo
##yeh
announcements
anthologies
barrio
nebula
##islaus
##shaft
##tyn
bodyguards
2021
assassinate
barns
emmett
scully
##mah
##yd
##eland
##tino
##itarian
demoted
gorman
lashed
prized
adventist
writ
##gui
alla
invertebrates
##ausen
1641
amman
1742
align
healy
redistribution
##gf
##rize
insulation
##drop
adherents
hezbollah
vitro
ferns
yanking
269
php
registering
uppsala
cheerleading
confines
mischievous
tully
##ross
49th
docked
roam
stipulated
pumpkin
##bry
prompt
##ezer
blindly
shuddering
craftsmen
frail
scented
katharine
scramble
shaggy
sponge
helix
zaragoza
279
##52
43rd
backlash
fontaine
seizures
posse
cowan
nonfiction
telenovela
wwii
hammered
undone
##gpur
encircled
irs
##ivation
artefacts
oneself
searing
smallpox
##belle
##osaurus
shandong
breached
upland
blushing
rankin
infinitely
psyche
tolerated
docking
evicted
##col
unmarked
##lving
gnome
lettering
litres
musique
##oint
benevolent
##jal
blackened
##anna
mccall
racers
tingle
##ocene
##orestation
introductions
radically
292
##hiff
##باد
1610
1739
munchen
plead
##nka
condo
scissors
##sight
##tens
apprehension
##cey
##yin
hallmark
watering
formulas
sequels
##llas
aggravated
bae
commencing
##building
enfield
prohibits
marne
vedic
civilized
euclidean
jagger
beforehand
blasts
dumont
##arney
##nem
740
conversions
hierarchical
rios
simulator
##dya
##lellan
hedges
oleg
thrusts
shadowed
darby
maximize
1744
gregorian
##nded
##routed
sham
unspecified
##hog
emory
factual
##smo
##tp
fooled
##rger
ortega
wellness
marlon
##oton
##urance
casket
keating
ley
enclave
##ayan
char
influencing
jia
##chenko
412
ammonia
erebidae
incompatible
violins
cornered
##arat
grooves
astronauts
columbian
rampant
fabrication
kyushu
mahmud
vanish
##dern
mesopotamia
##lete
ict
##rgen
caspian
kenji
pitted
##vered
999
grimace
roanoke
tchaikovsky
twinned
##analysis
##awan
xinjiang
arias
clemson
kazakh
sizable
1662
##khand
##vard
plunge
tatum
vittorio
##nden
cholera
##dana
##oper
bracing
indifference
projectile
superliga
##chee
realises
upgrading
299
porte
retribution
##vies
nk
stil
##resses
ama
bureaucracy
blackberry
bosch
testosterone
collapses
greer
##pathic
ioc
fifties
malls
##erved
bao
baskets
adolescents
siegfried
##osity
##tosis
mantra
detecting
existent
fledgling
##cchi
dissatisfied
gan
telecommunication
mingled
sobbed
6000
controversies
outdated
taxis
##raus
fright
slams
##lham
##fect
##tten
detectors
fetal
tanned
##uw
fray
goth
olympian
skipping
mandates
scratches
sheng
unspoken
hyundai
tracey
hotspur
restrictive
##buch
americana
mundo
##bari
burroughs
diva
vulcan
##6th
distinctions
thumping
##ngen
mikey
sheds
fide
rescues
springsteen
vested
valuation
##ece
##ely
pinnacle
rake
sylvie
##edo
almond
quivering
##irus
alteration
faltered
##wad
51st
hydra
ticked
##kato
recommends
##dicated
antigua
arjun
stagecoach
wilfred
trickle
pronouns
##pon
aryan
nighttime
##anian
gall
pea
stitch
##hei
leung
milos
##dini
eritrea
nexus
starved
snowfall
kant
parasitic
cot
discus
hana
strikers
appleton
kitchens
##erina
##partisan
##itha
##vius
disclose
metis
##channel
1701
tesla
##vera
fitch
1735
blooded
##tila
decimal
##tang
##bai
cyclones
eun
bottled
peas
pensacola
basha
bolivian
crabs
boil
lanterns
partridge
roofed
1645
necks
##phila
opined
patting
##kla
##lland
chuckles
volta
whereupon
##nche
devout
euroleague
suicidal
##dee
inherently
involuntary
knitting
nasser
##hide
puppets
colourful
courageous
southend
stills
miraculous
hodgson
richer
rochdale
ethernet
greta
uniting
prism
umm
##haya
##itical
##utation
deterioration
pointe
prowess
##ropriation
lids
scranton
billings
subcontinent
##koff
##scope
brute
kellogg
psalms
degraded
##vez
stanisław
##ructured
ferreira
pun
astonishing
gunnar
##yat
arya
prc
gottfried
##tight
excursion
##ographer
dina
##quil
##nare
huffington
illustrious
wilbur
gundam
verandah
##zard
naacp
##odle
constructive
fjord
kade
##naud
generosity
thrilling
baseline
cayman
frankish
plastics
accommodations
zoological
##fting
cedric
qb
motorized
##dome
##otted
squealed
tackled
canucks
budgets
situ
asthma
dail
gabled
grasslands
whimpered
writhing
judgments
##65
minnie
pv
##carbon
bananas
grille
domes
monique
odin
maguire
markham
tierney
##estra
##chua
libel
poke
speedy
atrium
laval
notwithstanding
##edly
fai
kala
##sur
robb
##sma
listings
luz
supplementary
tianjin
##acing
enzo
jd
ric
scanner
croats
transcribed
##49
arden
cv
##hair
##raphy
##lver
##uy
357
seventies
staggering
alam
horticultural
hs
regression
timbers
blasting
##ounded
montagu
manipulating
##cit
catalytic
1550
troopers
##meo
condemnation
fitzpatrick
##oire
##roved
inexperienced
1670
castes
##lative
outing
314
dubois
flicking
quarrel
ste
learners
1625
iq
whistled
##class
282
classify
tariffs
temperament
355
folly
liszt
##yles
immersed
jordanian
ceasefire
apparel
extras
maru
fished
##bio
harta
stockport
assortment
craftsman
paralysis
transmitters
##cola
blindness
##wk
fatally
proficiency
solemnly
##orno
repairing
amore
groceries
ultraviolet
##chase
schoolhouse
##tua
resurgence
nailed
##otype
##×
ruse
saliva
diagrams
##tructing
albans
rann
thirties
1b
antennas
hilarious
cougars
paddington
stats
##eger
breakaway
ipod
reza
authorship
prohibiting
scoffed
##etz
##ttle
conscription
defected
trondheim
##fires
ivanov
keenan
##adan
##ciful
##fb
##slow
locating
##ials
##tford
cadiz
basalt
blankly
interned
rags
rattling
##tick
carpathian
reassured
sync
bum
guildford
iss
staunch
##onga
astronomers
sera
sofie
emergencies
susquehanna
##heard
duc
mastery
vh1
williamsburg
bayer
buckled
craving
##khan
##rdes
bloomington
##write
alton
barbecue
##bians
justine
##hri
##ndt
delightful
smartphone
newtown
photon
retrieval
peugeot
hissing
##monium
##orough
flavors
lighted
relaunched
tainted
##games
##lysis
anarchy
microscopic
hopping
adept
evade
evie
##beau
inhibit
sinn
adjustable
hurst
intuition
wilton
cisco
44th
lawful
lowlands
stockings
thierry
##dalen
##hila
##nai
fates
prank
tb
maison
lobbied
provocative
1724
4a
utopia
##qual
carbonate
gujarati
purcell
##rford
curtiss
##mei
overgrown
arenas
mediation
swallows
##rnik
respectful
turnbull
##hedron
##hope
alyssa
ozone
##ʻi
ami
gestapo
johansson
snooker
canteen
cuff
declines
empathy
stigma
##ags
##iner
##raine
taxpayers
gui
volga
##wright
##copic
lifespan
overcame
tattooed
enactment
giggles
##ador
##camp
barrington
bribe
obligatory
orbiting
peng
##enas
elusive
sucker
##vating
cong
hardship
empowered
anticipating
estrada
cryptic
greasy
detainees
planck
sudbury
plaid
dod
marriott
kayla
##ears
##vb
##zd
mortally
##hein
cognition
radha
319
liechtenstein
meade
richly
argyle
harpsichord
liberalism
trumpets
lauded
tyrant
salsa
tiled
lear
promoters
reused
slicing
trident
##chuk
##gami
##lka
cantor
checkpoint
##points
gaul
leger
mammalian
##tov
##aar
##schaft
doha
frenchman
nirvana
##vino
delgado
headlining
##eron
##iography
jug
tko
1649
naga
intersections
##jia
benfica
nawab
##suka
ashford
gulp
##deck
##vill
##rug
brentford
frazier
pleasures
dunne
potsdam
shenzhen
dentistry
##tec
flanagan
##dorff
##hear
chorale
dinah
prem
quezon
##rogated
relinquished
sutra
terri
##pani
flaps
##rissa
poly
##rnet
homme
aback
##eki
linger
womb
##kson
##lewood
doorstep
orthodoxy
threaded
westfield
##rval
dioceses
fridays
subsided
##gata
loyalists
##biotic
##ettes
letterman
lunatic
prelate
tenderly
invariably
souza
thug
winslow
##otide
furlongs
gogh
jeopardy
##runa
pegasus
##umble
humiliated
standalone
tagged
##roller
freshmen
klan
##bright
attaining
initiating
transatlantic
logged
viz
##uance
1723
combatants
intervening
stephane
chieftain
despised
grazed
317
cdc
galveston
godzilla
macro
simulate
##planes
parades
##esses
960
##ductive
##unes
equator
overdose
##cans
##hosh
##lifting
joshi
epstein
sonora
treacherous
aquatics
manchu
responsive
##sation
supervisory
##christ
##llins
##ibar
##balance
##uso
kimball
karlsruhe
mab
##emy
ignores
phonetic
reuters
spaghetti
820
almighty
danzig
rumbling
tombstone
designations
lured
outset
##felt
supermarkets
##wt
grupo
kei
kraft
susanna
##blood
comprehension
genealogy
##aghan
##verted
redding
##ythe
1722
bowing
##pore
##roi
lest
sharpened
fulbright
valkyrie
sikhs
##unds
swans
bouquet
merritt
##tage
##venting
commuted
redhead
clerks
leasing
cesare
dea
hazy
##vances
fledged
greenfield
servicemen
##gical
armando
blackout
dt
sagged
downloadable
intra
potion
pods
##4th
##mism
xp
attendants
gambia
stale
##ntine
plump
asteroids
rediscovered
buds
flea
hive
##neas
1737
classifications
debuts
##eles
olympus
scala
##eurs
##gno
##mute
hummed
sigismund
visuals
wiggled
await
pilasters
clench
sulfate
##ances
bellevue
enigma
trainee
snort
##sw
clouded
denim
##rank
##rder
churning
hartman
lodges
riches
sima
##missible
accountable
socrates
regulates
mueller
##cr
1702
avoids
solids
himalayas
nutrient
pup
##jevic
squat
fades
nec
##lates
##pina
##rona
##ου
privateer
tequila
##gative
##mpton
apt
hornet
immortals
##dou
asturias
cleansing
dario
##rries
##anta
etymology
servicing
zhejiang
##venor
##nx
horned
erasmus
rayon
relocating
£10
##bags
escalated
promenade
stubble
2010s
artisans
axial
liquids
mora
sho
yoo
##tsky
bundles
oldies
##nally
notification
bastion
##ths
sparkle
##lved
1728
leash
pathogen
highs
##hmi
immature
880
gonzaga
ignatius
mansions
monterrey
sweets
bryson
##loe
polled
regatta
brightest
pei
rosy
squid
hatfield
payroll
addict
meath
cornerback
heaviest
lodging
##mage
capcom
rippled
##sily
barnet
mayhem
ymca
snuggled
rousseau
##cute
blanchard
284
fragmented
leighton
chromosomes
risking
##md
##strel
##utter
corinne
coyotes
cynical
hiroshi
yeomanry
##ractive
ebook
grading
mandela
plume
agustin
magdalene
##rkin
bea
femme
trafford
##coll
##lun
##tance
52nd
fourier
upton
##mental
camilla
gust
iihf
islamabad
longevity
##kala
feldman
netting
##rization
endeavour
foraging
mfa
orr
##open
greyish
contradiction
graz
##ruff
handicapped
marlene
tweed
oaxaca
spp
campos
miocene
pri
configured
cooks
pluto
cozy
pornographic
##entes
70th
fairness
glided
jonny
lynne
rounding
sired
##emon
##nist
remade
uncover
##mack
complied
lei
newsweek
##jured
##parts
##enting
##pg
293
finer
guerrillas
athenian
deng
disused
stepmother
accuse
gingerly
seduction
521
confronting
##walker
##going
gora
nostalgia
sabres
virginity
wrenched
##minated
syndication
wielding
eyre
##56
##gnon
##igny
behaved
taxpayer
sweeps
##growth
childless
gallant
##ywood
amplified
geraldine
scrape
##ffi
babylonian
fresco
##rdan
##kney
##position
1718
restricting
tack
fukuoka
osborn
selector
partnering
##dlow
318
gnu
kia
tak
whitley
gables
##54
##mania
mri
softness
immersion
##bots
##evsky
1713
chilling
insignificant
pcs
##uis
elites
lina
purported
supplemental
teaming
##americana
##dding
##inton
proficient
rouen
##nage
##rret
niccolo
selects
##bread
fluffy
1621
gruff
knotted
mukherjee
polgara
thrash
nicholls
secluded
smoothing
thru
corsica
loaf
whitaker
inquiries
##rrier
##kam
indochina
289
marlins
myles
peking
##tea
extracts
pastry
superhuman
connacht
vogel
##ditional
##het
##udged
##lash
gloss
quarries
refit
teaser
##alic
##gaon
20s
materialized
sling
camped
pickering
tung
tracker
pursuant
##cide
cranes
soc
##cini
##typical
##viere
anhalt
overboard
workout
chores
fares
orphaned
stains
##logie
fenton
surpassing
joyah
triggers
##itte
grandmaster
##lass
##lists
clapping
fraudulent
ledger
nagasaki
##cor
##nosis
##tsa
eucalyptus
tun
##icio
##rney
##tara
dax
heroism
ina
wrexham
onboard
unsigned
##dates
moshe
galley
winnie
droplets
exiles
praises
watered
noodles
##aia
fein
adi
leland
multicultural
stink
bingo
comets
erskine
modernized
canned
constraint
domestically
chemotherapy
featherweight
stifled
##mum
darkly
irresistible
refreshing
hasty
isolate
##oys
kitchener
planners
##wehr
cages
yarn
implant
toulon
elects
childbirth
yue
##lind
##lone
cn
rightful
sportsman
junctions
remodeled
specifies
##rgh
291
##oons
complimented
##urgent
lister
ot
##logic
bequeathed
cheekbones
fontana
gabby
##dial
amadeus
corrugated
maverick
resented
triangles
##hered
##usly
nazareth
tyrol
1675
assent
poorer
sectional
aegean
##cous
296
nylon
ghanaian
##egorical
##weig
cushions
forbid
fusiliers
obstruction
somerville
##scia
dime
earrings
elliptical
leyte
oder
polymers
timmy
atm
midtown
piloted
settles
continual
externally
mayfield
##uh
enrichment
henson
keane
persians
1733
benji
braden
pep
324
##efe
contenders
pepsi
valet
##isches
298
##asse
##earing
goofy
stroll
##amen
authoritarian
occurrences
adversary
ahmedabad
tangent
toppled
dorchester
1672
modernism
marxism
islamist
charlemagne
exponential
racks
unicode
brunette
mbc
pic
skirmish
##bund
##lad
##powered
##yst
hoisted
messina
shatter
##ctum
jedi
vantage
##music
##neil
clemens
mahmoud
corrupted
authentication
lowry
nils
##washed
omnibus
wounding
jillian
##itors
##opped
serialized
narcotics
handheld
##arm
##plicity
intersecting
stimulating
##onis
crate
fellowships
hemingway
casinos
climatic
fordham
copeland
drip
beatty
leaflets
robber
brothel
madeira
##hedral
sphinx
ultrasound
##vana
valor
forbade
leonid
villas
##aldo
duane
marquez
##cytes
disadvantaged
forearms
kawasaki
reacts
consular
lax
uncles
uphold
##hopper
concepcion
dorsey
lass
##izan
arching
passageway
1708
researches
tia
internationals
##graphs
##opers
distinguishes
javanese
divert
##uven
plotted
##listic
##rwin
##erik
##tify
affirmative
signifies
validation
##bson
kari
felicity
georgina
zulu
##eros
##rained
##rath
overcoming
##dot
argyll
##rbin
1734
chiba
ratification
windy
earls
parapet
##marks
hunan
pristine
astrid
punta
##gart
brodie
##kota
##oder
malaga
minerva
rouse
##phonic
bellowed
pagoda
portals
reclamation
##gur
##odies
##⁄₄
parentheses
quoting
allergic
palette
showcases
benefactor
heartland
nonlinear
##tness
bladed
cheerfully
scans
##ety
##hone
1666
girlfriends
pedersen
hiram
sous
##liche
##nator
1683
##nery
##orio
##umen
bobo
primaries
smiley
##cb
unearthed
uniformly
fis
metadata
1635
ind
##oted
recoil
##titles
##tura
##ια
406
hilbert
jamestown
mcmillan
tulane
seychelles
##frid
antics
coli
fated
stucco
##grants
1654
bulky
accolades
arrays
caledonian
carnage
optimism
puebla
##tative
##cave
enforcing
rotherham
seo
dunlop
aeronautics
chimed
incline
zoning
archduke
hellenistic
##oses
##sions
candi
thong
##ople
magnate
rustic
##rsk
projective
slant
##offs
danes
hollis
vocalists
##ammed
congenital
contend
gesellschaft
##ocating
##pressive
douglass
quieter
##cm
##kshi
howled
salim
spontaneously
townsville
buena
southport
##bold
kato
1638
faerie
stiffly
##vus
##rled
297
flawless
realising
taboo
##7th
bytes
straightening
356
jena
##hid
##rmin
cartwright
berber
bertram
soloists
411
noses
417
coping
fission
hardin
inca
##cen
1717
mobilized
vhf
##raf
biscuits
curate
##85
##anial
331
gaunt
neighbourhoods
1540
##abas
blanca
bypassed
sockets
behold
coincidentally
##bane
nara
shave
splinter
terrific
##arion
##erian
commonplace
juris
redwood
waistband
boxed
caitlin
fingerprints
jennie
naturalized
##ired
balfour
craters
jody
bungalow
hugely
quilt
glitter
pigeons
undertaker
bulging
constrained
goo
##sil
##akh
assimilation
reworked
##person
persuasion
##pants
felicia
##cliff
##ulent
1732
explodes
##dun
##inium
##zic
lyman
vulture
hog
overlook
begs
northwards
ow
spoil
##urer
fatima
favorably
accumulate
sargent
sorority
corresponded
dispersal
kochi
toned
##imi
##lita
internacional
newfound
##agger
##lynn
##rigue
booths
peanuts
##eborg
medicare
muriel
nur
##uram
crates
millennia
pajamas
worsened
##breakers
jimi
vanuatu
yawned
##udeau
carousel
##hony
hurdle
##ccus
##mounted
##pod
rv
##eche
airship
ambiguity
compulsion
recapture
##claiming
arthritis
##osomal
1667
asserting
ngc
sniffing
dade
discontent
glendale
ported
##amina
defamation
rammed
##scent
fling
livingstone
##fleet
875
##ppy
apocalyptic
comrade
lcd
##lowe
cessna
eine
persecuted
subsistence
demi
hoop
reliefs
710
coptic
progressing
stemmed
perpetrators
1665
priestess
##nio
dobson
ebony
rooster
itf
tortricidae
##bbon
##jian
cleanup
##jean
##øy
1721
eighties
taxonomic
holiness
##hearted
##spar
antilles
showcasing
stabilized
##nb
gia
mascara
michelangelo
dawned
##uria
##vinsky
extinguished
fitz
grotesque
£100
##fera
##loid
##mous
barges
neue
throbbed
cipher
johnnie
##a1
##mpt
outburst
##swick
spearheaded
administrations
c1
heartbreak
pixels
pleasantly
##enay
lombardy
plush
##nsed
bobbie
##hly
reapers
tremor
xiang
minogue
substantive
hitch
barak
##wyl
kwan
##encia
910
obscene
elegance
indus
surfer
bribery
conserve
##hyllum
##masters
horatio
##fat
apes
rebound
psychotic
##pour
iteration
##mium
##vani
botanic
horribly
antiques
dispose
paxton
##hli
##wg
timeless
1704
disregard
engraver
hounds
##bau
##version
looted
uno
facilitates
groans
masjid
rutland
antibody
disqualification
decatur
footballers
quake
slacks
48th
rein
scribe
stabilize
commits
exemplary
tho
##hort
##chison
pantry
traversed
##hiti
disrepair
identifiable
vibrated
baccalaureate
##nnis
csa
interviewing
##iensis
##raße
greaves
wealthiest
343
classed
jogged
£5
##58
##atal
illuminating
knicks
respecting
##uno
scrubbed
##iji
##dles
kruger
moods
growls
raider
silvia
chefs
kam
vr
cree
percival
##terol
gunter
counterattack
defiant
henan
ze
##rasia
##riety
equivalence
submissions
##fra
##thor
bautista
mechanically
##heater
cornice
herbal
templar
##mering
outputs
ruining
ligand
renumbered
extravagant
mika
blockbuster
eta
insurrection
##ilia
darkening
ferocious
pianos
strife
kinship
##aer
melee
##anor
##iste
##may
##oue
decidedly
weep
##jad
##missive
##ppel
354
puget
unease
##gnant
1629
hammering
kassel
ob
wessex
##lga
bromwich
egan
paranoia
utilization
##atable
##idad
contradictory
provoke
##ols
##ouring
##tangled
knesset
##very
##lette
plumbing
##sden
##¹
greensboro
occult
sniff
338
zev
beaming
gamer
haggard
mahal
##olt
##pins
mendes
utmost
briefing
gunnery
##gut
##pher
##zh
##rok
1679
khalifa
sonya
##boot
principals
urbana
wiring
##liffe
##minating
##rrado
dahl
nyu
skepticism
np
townspeople
ithaca
lobster
somethin
##fur
##arina
##−1
freighter
zimmerman
biceps
contractual
##herton
amend
hurrying
subconscious
##anal
336
meng
clermont
spawning
##eia
##lub
dignitaries
impetus
snacks
spotting
twigs
##bilis
##cz
##ouk
libertadores
nic
skylar
##aina
##firm
gustave
asean
##anum
dieter
legislatures
flirt
bromley
trolls
umar
##bbies
##tyle
blah
parc
bridgeport
crank
negligence
##nction
46th
constantin
molded
bandages
seriousness
00pm
siegel
carpets
compartments
upbeat
statehood
##dner
##edging
marko
730
platt
##hane
paving
##iy
1738
abbess
impatience
limousine
nbl
##talk
441
lucille
mojo
nightfall
robbers
##nais
karel
brisk
calves
replicate
ascribed
telescopes
##olf
intimidated
##reen
ballast
specialization
##sit
aerodynamic
caliphate
rainer
visionary
##arded
epsilon
##aday
##onte
aggregation
auditory
boosted
reunification
kathmandu
loco
robyn
402
acknowledges
appointing
humanoid
newell
redeveloped
restraints
##tained
barbarians
chopper
1609
italiana
##lez
##lho
investigates
wrestlemania
##anies
##bib
690
##falls
creaked
dragoons
gravely
minions
stupidity
volley
##harat
##week
musik
##eries
##uously
fungal
massimo
semantics
malvern
##ahl
##pee
discourage
embryo
imperialism
1910s
profoundly
##ddled
jiangsu
sparkled
stat
##holz
sweatshirt
tobin
##iction
sneered
##cheon
##oit
brit
causal
smyth
##neuve
diffuse
perrin
silvio
##ipes
##recht
detonated
iqbal
selma
##nism
##zumi
roasted
##riders
tay
##ados
##mament
##mut
##rud
840
completes
nipples
cfa
flavour
hirsch
##laus
calderon
sneakers
moravian
##ksha
1622
rq
294
##imeters
bodo
##isance
##pre
##ronia
anatomical
excerpt
##lke
dh
kunst
##tablished
##scoe
biomass
panted
unharmed
gael
housemates
montpellier
##59
coa
rodents
tonic
hickory
singleton
##taro
451
1719
aldo
breaststroke
dempsey
och
rocco
##cuit
merton
dissemination
midsummer
serials
##idi
haji
polynomials
##rdon
gs
enoch
prematurely
shutter
taunton
£3
##grating
##inates
archangel
harassed
##asco
326
archway
dazzling
##ecin
1736
sumo
wat
##kovich
1086
honneur
##ently
##nostic
##ttal
##idon
1605
403
1716
blogger
rents
##gnan
hires
##ikh
##dant
howie
##rons
handler
retracted
shocks
1632
arun
duluth
kepler
trumpeter
##lary
peeking
seasoned
trooper
##mara
laszlo
##iciencies
##rti
heterosexual
##inatory
##ssion
indira
jogging
##inga
##lism
beit
dissatisfaction
malice
##ately
nedra
peeling
##rgeon
47th
stadiums
475
vertigo
##ains
iced
restroom
##plify
##tub
illustrating
pear
##chner
##sibility
inorganic
rappers
receipts
watery
##kura
lucinda
##oulos
reintroduced
##8th
##tched
gracefully
saxons
nutritional
wastewater
rained
favourites
bedrock
fisted
hallways
likeness
upscale
##lateral
1580
blinds
prequel
##pps
##tama
deter
humiliating
restraining
tn
vents
1659
laundering
recess
rosary
tractors
coulter
federer
##ifiers
##plin
persistence
##quitable
geschichte
pendulum
quakers
##beam
bassett
pictorial
buffet
koln
##sitor
drills
reciprocal
shooters
##57
##cton
##tees
converge
pip
dmitri
donnelly
yamamoto
aqua
azores
demographics
hypnotic
spitfire
suspend
wryly
roderick
##rran
sebastien
##asurable
mavericks
##fles
##200
himalayan
prodigy
##iance
transvaal
demonstrators
handcuffs
dodged
mcnamara
sublime
1726
crazed
##efined
##till
ivo
pondered
reconciled
shrill
sava
##duk
bal
cad
heresy
jaipur
goran
##nished
341
lux
shelly
whitehall
##hre
israelis
peacekeeping
##wled
1703
demetrius
ousted
##arians
##zos
beale
anwar
backstroke
raged
shrinking
cremated
##yck
benign
towing
wadi
darmstadt
landfill
parana
soothe
colleen
sidewalks
mayfair
tumble
hepatitis
ferrer
superstructure
##gingly
##urse
##wee
anthropological
translators
##mies
closeness
hooves
##pw
mondays
##roll
##vita
landscaping
##urized
purification
sock
thorns
thwarted
jalan
tiberius
##taka
saline
##rito
confidently
khyber
sculptors
##ij
brahms
hammersmith
inspectors
battista
fivb
fragmentation
hackney
##uls
arresting
exercising
antoinette
bedfordshire
##zily
dyed
##hema
1656
racetrack
variability
##tique
1655
austrians
deteriorating
madman
theorists
aix
lehman
weathered
1731
decreed
eruptions
1729
flaw
quinlan
sorbonne
flutes
nunez
1711
adored
downwards
fable
rasped
1712
moritz
mouthful
renegade
shivers
stunts
dysfunction
restrain
translit
327
pancakes
##avio
##cision
##tray
351
vial
##lden
bain
##maid
##oxide
chihuahua
malacca
vimes
##rba
##rnier
1664
donnie
plaques
##ually
337
bangs
floppy
huntsville
loretta
nikolay
##otte
eater
handgun
ubiquitous
##hett
eras
zodiac
1634
##omorphic
1820s
##zog
cochran
##bula
##lithic
warring
##rada
dalai
excused
blazers
mcconnell
reeling
bot
este
##abi
geese
hoax
taxon
##bla
guitarists
##icon
condemning
hunts
inversion
moffat
taekwondo
##lvis
1624
stammered
##rest
##rzy
sousa
fundraiser
marylebone
navigable
uptown
cabbage
daniela
salman
shitty
whimper
##kian
##utive
programmers
protections
rm
##rmi
##rued
forceful
##enes
fuss
##tao
##wash
brat
oppressive
reykjavik
spartak
ticking
##inkles
##kiewicz
adolph
horst
maui
protege
straighten
cpc
landau
concourse
clements
resultant
##ando
imaginative
joo
reactivated
##rem
##ffled
##uising
consultative
##guide
flop
kaitlyn
mergers
parenting
somber
##vron
supervise
vidhan
##imum
courtship
exemplified
harmonies
medallist
refining
##rrow
##ка
amara
##hum
780
goalscorer
sited
overshadowed
rohan
displeasure
secretive
multiplied
osman
##orth
engravings
padre
##kali
##veda
miniatures
mis
##yala
clap
pali
rook
##cana
1692
57th
antennae
astro
oskar
1628
bulldog
crotch
hackett
yucatan
##sure
amplifiers
brno
ferrara
migrating
##gree
thanking
turing
##eza
mccann
ting
andersson
onslaught
gaines
ganga
incense
standardization
##mation
sentai
scuba
stuffing
turquoise
waivers
alloys
##vitt
regaining
vaults
##clops
##gizing
digger
furry
memorabilia
probing
##iad
payton
rec
deutschland
filippo
opaque
seamen
zenith
afrikaans
##filtration
disciplined
inspirational
##merie
banco
confuse
grafton
tod
##dgets
championed
simi
anomaly
biplane
##ceptive
electrode
##para
1697
cleavage
crossbow
swirl
informant
##lars
##osta
afi
bonfire
spec
##oux
lakeside
slump
##culus
##lais
##qvist
##rrigan
1016
facades
borg
inwardly
cervical
xl
pointedly
050
stabilization
##odon
chests
1699
hacked
ctv
orthogonal
suzy
##lastic
gaulle
jacobite
rearview
##cam
##erted
ashby
##drik
##igate
##mise
##zbek
affectionately
canine
disperse
latham
##istles
##ivar
spielberg
##orin
##idium
ezekiel
cid
##sg
durga
middletown
##cina
customized
frontiers
harden
##etano
##zzy
1604
bolsheviks
##66
coloration
yoko
##bedo
briefs
slabs
debra
liquidation
plumage
##oin
blossoms
dementia
subsidy
1611
proctor
relational
jerseys
parochial
ter
##ici
esa
peshawar
cavalier
loren
cpi
idiots
shamrock
1646
dutton
malabar
mustache
##endez
##ocytes
referencing
terminates
marche
yarmouth
##sop
acton
mated
seton
subtly
baptised
beige
extremes
jolted
kristina
telecast
##actic
safeguard
waldo
##baldi
##bular
endeavors
sloppy
subterranean
##ensburg
##itung
delicately
pigment
tq
##scu
1626
##ound
collisions
coveted
herds
##personal
##meister
##nberger
chopra
##ricting
abnormalities
defective
galician
lucie
##dilly
alligator
likened
##genase
burundi
clears
complexion
derelict
deafening
diablo
fingered
champaign
dogg
enlist
isotope
labeling
mrna
##erre
brilliance
marvelous
##ayo
1652
crawley
ether
footed
dwellers
deserts
hamish
rubs
warlock
skimmed
##lizer
870
buick
embark
heraldic
irregularities
##ajan
kiara
##kulam
##ieg
antigen
kowalski
##lge
oakley
visitation
##mbit
vt
##suit
1570
murderers
##miento
##rites
chimneys
##sling
condemn
custer
exchequer
havre
##ghi
fluctuations
##rations
dfb
hendricks
vaccines
##tarian
nietzsche
biking
juicy
##duced
brooding
scrolling
selangor
##ragan
352
annum
boomed
seminole
sugarcane
##dna
departmental
dismissing
innsbruck
arteries
ashok
batavia
daze
kun
overtook
##rga
##tlan
beheaded
gaddafi
holm
electronically
faulty
galilee
fractures
kobayashi
##lized
gunmen
magma
aramaic
mala
eastenders
inference
messengers
bf
##qu
407
bathrooms
##vere
1658
flashbacks
ideally
misunderstood
##jali
##weather
mendez
##grounds
505
uncanny
##iii
1709
friendships
##nbc
sacrament
accommodated
reiterated
logistical
pebbles
thumped
##escence
administering
decrees
drafts
##flight
##cased
##tula
futuristic
picket
intimidation
winthrop
##fahan
interfered
339
afar
francoise
morally
uta
cochin
croft
dwarfs
##bruck
##dents
##nami
biker
##hner
##meral
nano
##isen
##ometric
##pres
##ан
brightened
meek
parcels
securely
gunners
##jhl
##zko
agile
hysteria
##lten
##rcus
bukit
champs
chevy
cuckoo
leith
sadler
theologians
welded
##section
1663
jj
plurality
xander
##rooms
##formed
shredded
temps
intimately
pau
tormented
##lok
##stellar
1618
charred
ems
essen
##mmel
alarms
spraying
ascot
blooms
twinkle
##abia
##apes
internment
obsidian
##chaft
snoop
##dav
##ooping
malibu
##tension
quiver
##itia
hays
mcintosh
travers
walsall
##ffie
1623
beverley
schwarz
plunging
structurally
m3
rosenthal
vikram
##tsk
770
ghz
##onda
##tiv
chalmers
groningen
pew
reckon
unicef
##rvis
55th
##gni
1651
sulawesi
avila
cai
metaphysical
screwing
turbulence
##mberg
augusto
samba
56th
baffled
momentary
toxin
##urian
##wani
aachen
condoms
dali
steppe
##3d
##app
##oed
##year
adolescence
dauphin
electrically
inaccessible
microscopy
nikita
##ega
atv
##cel
##enter
##oles
##oteric
##ы
accountants
punishments
wrongly
bribes
adventurous
clinch
flinders
southland
##hem
##kata
gough
##ciency
lads
soared
##ה
undergoes
deformation
outlawed
rubbish
##arus
##mussen
##nidae
##rzburg
arcs
##ingdon
##tituted
1695
wheelbase
wheeling
bombardier
campground
zebra
##lices
##oj
##bain
lullaby
##ecure
donetsk
wylie
grenada
##arding
##ης
squinting
eireann
opposes
##andra
maximal
runes
##broken
##cuting
##iface
##ror
##rosis
additive
britney
adultery
triggering
##drome
detrimental
aarhus
containment
jc
swapped
vichy
##ioms
madly
##oric
##rag
brant
##ckey
##trix
1560
1612
broughton
rustling
##stems
##uder
asbestos
mentoring
##nivorous
finley
leaps
##isan
apical
pry
slits
substitutes
##dict
intuitive
fantasia
insistent
unreasonable
##igen
##vna
domed
hannover
margot
ponder
##zziness
impromptu
jian
lc
rampage
stemming
##eft
andrey
gerais
whichever
amnesia
appropriated
anzac
clicks
modifying
ultimatum
cambrian
maids
verve
yellowstone
##mbs
conservatoire
##scribe
adherence
dinners
spectra
imperfect
mysteriously
sidekick
tatar
tuba
##aks
##ifolia
distrust
##athan
##zle
c2
ronin
zac
##pse
celaena
instrumentalist
scents
skopje
##mbling
comical
compensated
vidal
condor
intersect
jingle
wavelengths
##urrent
mcqueen
##izzly
carp
weasel
422
kanye
militias
postdoctoral
eugen
gunslinger
##ɛ
faux
hospice
##for
appalled
derivation
dwarves
##elis
dilapidated
##folk
astoria
philology
##lwyn
##otho
##saka
inducing
philanthropy
##bf
##itative
geek
markedly
sql
##yce
bessie
indices
rn
##flict
495
frowns
resolving
weightlifting
tugs
cleric
contentious
1653
mania
rms
##miya
##reate
##ruck
##tucket
bien
eels
marek
##ayton
##cence
discreet
unofficially
##ife
leaks
##bber
1705
332
dung
compressor
hillsborough
pandit
shillings
distal
##skin
381
##tat
##you
nosed
##nir
mangrove
undeveloped
##idia
textures
##inho
##500
##rise
ae
irritating
nay
amazingly
bancroft
apologetic
compassionate
kata
symphonies
##lovic
airspace
##lch
930
gifford
precautions
fulfillment
sevilla
vulgar
martinique
##urities
looting
piccolo
tidy
##dermott
quadrant
armchair
incomes
mathematicians
stampede
nilsson
##inking
##scan
foo
quarterfinal
##ostal
shang
shouldered
squirrels
##owe
344
vinegar
##bner
##rchy
##systems
delaying
##trics
ars
dwyer
rhapsody
sponsoring
##gration
bipolar
cinder
starters
##olio
##urst
421
signage
##nty
aground
figurative
mons
acquaintances
duets
erroneously
soyuz
elliptic
recreated
##cultural
##quette
##ssed
##tma
##zcz
moderator
scares
##itaire
##stones
##udence
juniper
sighting
##just
##nsen
britten
calabria
ry
bop
cramer
forsyth
stillness
##л
airmen
gathers
unfit
##umber
##upt
taunting
##rip
seeker
streamlined
##bution
holster
schumann
tread
vox
##gano
##onzo
strive
dil
reforming
covent
newbury
predicting
##orro
decorate
tre
##puted
andover
ie
asahi
dept
dunkirk
gills
##tori
buren
huskies
##stis
##stov
abstracts
bets
loosen
##opa
1682
yearning
##glio
##sir
berman
effortlessly
enamel
napoli
persist
##peration
##uez
attache
elisa
b1
invitations
##kic
accelerating
reindeer
boardwalk
clutches
nelly
polka
starbucks
##kei
adamant
huey
lough
unbroken
adventurer
embroidery
inspecting
stanza
##ducted
naia
taluka
##pone
##roids
chases
deprivation
florian
##jing
##ppet
earthly
##lib
##ssee
colossal
foreigner
vet
freaks
patrice
rosewood
triassic
upstate
##pkins
dominates
ata
chants
ks
vo
##400
##bley
##raya
##rmed
555
agra
infiltrate
##ailing
##ilation
##tzer
##uppe
##werk
binoculars
enthusiast
fujian
squeak
##avs
abolitionist
almeida
boredom
hampstead
marsden
rations
##ands
inflated
334
bonuses
rosalie
patna
##rco
329
detachments
penitentiary
54th
flourishing
woolf
##dion
##etched
papyrus
##lster
##nsor
##toy
bobbed
dismounted
endelle
inhuman
motorola
tbs
wince
wreath
##ticus
hideout
inspections
sanjay
disgrace
infused
pudding
stalks
##urbed
arsenic
leases
##hyl
##rrard
collarbone
##waite
##wil
dowry
##bant
##edance
genealogical
nitrate
salamanca
scandals
thyroid
necessitated
##!
##"
###
##$
##%
##&
##'
##(
##)
##*
##+
##,
##-
##.
##/
##:
##;
##<
##=
##>
##?
##@
##[
##\
##]
##^
##_
##`
##{
##|
##}
##~
##¡
##¢
##£
##¤
##¥
##¦
##§
##¨
##©
##ª
##«
##¬
##®
##±
##´
##µ
##¶
##·
##º
##»
##¼
##¾
##¿
##æ
##ð
##÷
##þ
##đ
##ħ
##ŋ
##œ
##ƒ
##ɐ
##ɑ
##ɒ
##ɔ
##ɕ
##ə
##ɡ
##ɣ
##ɨ
##ɪ
##ɫ
##ɬ
##ɯ
##ɲ
##ɴ
##ɹ
##ɾ
##ʀ
##ʁ
##ʂ
##ʃ
##ʉ
##ʊ
##ʋ
##ʌ
##ʎ
##ʐ
##ʑ
##ʒ
##ʔ
##ʰ
##ʲ
##ʳ
##ʷ
##ʸ
##ʻ
##ʼ
##ʾ
##ʿ
##ˈ
##ˡ
##ˢ
##ˣ
##ˤ
##β
##γ
##δ
##ε
##ζ
##θ
##κ
##λ
##μ
##ξ
##ο
##π
##ρ
##σ
##τ
##υ
##φ
##χ
##ψ
##ω
##б
##г
##д
##ж
##з
##м
##п
##с
##у
##ф
##х
##ц
##ч
##ш
##щ
##ъ
##э
##ю
##ђ
##є
##і
##ј
##љ
##њ
##ћ
##ӏ
##ա
##բ
##գ
##դ
##ե
##թ
##ի
##լ
##կ
##հ
##մ
##յ
##ն
##ո
##պ
##ս
##վ
##տ
##ր
##ւ
##ք
##־
##א
##ב
##ג
##ד
##ו
##ז
##ח
##ט
##י
##ך
##כ
##ל
##ם
##מ
##ן
##נ
##ס
##ע
##ף
##פ
##ץ
##צ
##ק
##ר
##ש
##ת
##،
##ء
##ب
##ت
##ث
##ج
##ح
##خ
##ذ
##ز
##س
##ش
##ص
##ض
##ط
##ظ
##ع
##غ
##ـ
##ف
##ق
##ك
##و
##ى
##ٹ
##پ
##چ
##ک
##گ
##ں
##ھ
##ہ
##ے
##अ
##आ
##उ
##ए
##क
##ख
##ग
##च
##ज
##ट
##ड
##ण
##त
##थ
##द
##ध
##न
##प
##ब
##भ
##म
##य
##र
##ल
##व
##श
##ष
##स
##ह
##ा
##ि
##ी
##ो
##।
##॥
##ং
##অ
##আ
##ই
##উ
##এ
##ও
##ক
##খ
##গ
##চ
##ছ
##জ
##ট
##ড
##ণ
##ত
##থ
##দ
##ধ
##ন
##প
##ব
##ভ
##ম
##য
##র
##ল
##শ
##ষ
##স
##হ
##া
##ি
##ী
##ে
##க
##ச
##ட
##த
##ந
##ன
##ப
##ம
##ய
##ர
##ல
##ள
##வ
##ா
##ி
##ு
##ே
##ை
##ನ
##ರ
##ಾ
##ක
##ය
##ර
##ල
##ව
##ා
##ก
##ง
##ต
##ท
##น
##พ
##ม
##ย
##ร
##ล
##ว
##ส
##อ
##า
##เ
##་
##།
##ག
##ང
##ད
##ན
##པ
##བ
##མ
##འ
##ར
##ལ
##ས
##မ
##ა
##ბ
##გ
##დ
##ე
##ვ
##თ
##ი
##კ
##ლ
##მ
##ნ
##ო
##რ
##ს
##ტ
##უ
##ᄀ
##ᄂ
##ᄃ
##ᄅ
##ᄆ
##ᄇ
##ᄉ
##ᄊ
##ᄋ
##ᄌ
##ᄎ
##ᄏ
##ᄐ
##ᄑ
##ᄒ
##ᅡ
##ᅢ
##ᅥ
##ᅦ
##ᅧ
##ᅩ
##ᅪ
##ᅭ
##ᅮ
##ᅯ
##ᅲ
##ᅳ
##ᅴ
##ᅵ
##ᆨ
##ᆫ
##ᆯ
##ᆷ
##ᆸ
##ᆼ
##ᴬ
##ᴮ
##ᴰ
##ᴵ
##ᴺ
##ᵀ
##ᵃ
##ᵇ
##ᵈ
##ᵉ
##ᵍ
##ᵏ
##ᵐ
##ᵒ
##ᵖ
##ᵗ
##ᵘ
##ᵣ
##ᵤ
##ᵥ
##ᶜ
##ᶠ
##‐
##‑
##‒
##–
##—
##―
##‖
##‘
##’
##‚
##“
##”
##„
##†
##‡
##•
##…
##‰
##′
##″
##›
##‿
##⁄
##⁰
##ⁱ
##⁴
##⁵
##⁶
##⁷
##⁸
##⁹
##⁻
##ⁿ
##₅
##₆
##₇
##₈
##₉
##₊
##₍
##₎
##ₐ
##ₑ
##ₒ
##ₓ
##ₕ
##ₖ
##ₗ
##ₘ
##ₚ
##ₛ
##ₜ
##₤
##₩
##€
##₱
##₹
##ℓ
##№
##ℝ
##™
##⅓
##⅔
##←
##↑
##→
##↓
##↔
##↦
##⇄
##⇌
##⇒
##∂
##∅
##∆
##∇
##∈
##∗
##∘
##√
##∞
##∧
##∨
##∩
##∪
##≈
##≡
##≤
##≥
##⊂
##⊆
##⊕
##⊗
##⋅
##─
##│
##■
##▪
##●
##★
##☆
##☉
##♠
##♣
##♥
##♦
##♯
##⟨
##⟩
##ⱼ
##⺩
##⺼
##⽥
##、
##。
##〈
##〉
##《
##》
##「
##」
##『
##』
##〜
##あ
##い
##う
##え
##お
##か
##き
##く
##け
##こ
##さ
##し
##す
##せ
##そ
##た
##ち
##っ
##つ
##て
##と
##な
##に
##ぬ
##ね
##の
##は
##ひ
##ふ
##へ
##ほ
##ま
##み
##む
##め
##も
##や
##ゆ
##よ
##ら
##り
##る
##れ
##ろ
##を
##ん
##ァ
##ア
##ィ
##イ
##ウ
##ェ
##エ
##オ
##カ
##キ
##ク
##ケ
##コ
##サ
##シ
##ス
##セ
##タ
##チ
##ッ
##ツ
##テ
##ト
##ナ
##ニ
##ノ
##ハ
##ヒ
##フ
##ヘ
##ホ
##マ
##ミ
##ム
##メ
##モ
##ャ
##ュ
##ョ
##ラ
##リ
##ル
##レ
##ロ
##ワ
##ン
##・
##ー
##一
##三
##上
##下
##不
##世
##中
##主
##久
##之
##也
##事
##二
##五
##井
##京
##人
##亻
##仁
##介
##代
##仮
##伊
##会
##佐
##侍
##保
##信
##健
##元
##光
##八
##公
##内
##出
##分
##前
##劉
##力
##加
##勝
##北
##区
##十
##千
##南
##博
##原
##口
##古
##史
##司
##合
##吉
##同
##名
##和
##囗
##四
##国
##國
##土
##地
##坂
##城
##堂
##場
##士
##夏
##外
##大
##天
##太
##夫
##奈
##女
##子
##学
##宀
##宇
##安
##宗
##定
##宣
##宮
##家
##宿
##寺
##將
##小
##尚
##山
##岡
##島
##崎
##川
##州
##巿
##帝
##平
##年
##幸
##广
##弘
##張
##彳
##後
##御
##德
##心
##忄
##志
##忠
##愛
##成
##我
##戦
##戸
##手
##扌
##政
##文
##新
##方
##日
##明
##星
##春
##昭
##智
##曲
##書
##月
##有
##朝
##木
##本
##李
##村
##東
##松
##林
##森
##楊
##樹
##橋
##歌
##止
##正
##武
##比
##氏
##民
##水
##氵
##氷
##永
##江
##沢
##河
##治
##法
##海
##清
##漢
##瀬
##火
##版
##犬
##王
##生
##田
##男
##疒
##発
##白
##的
##皇
##目
##相
##省
##真
##石
##示
##社
##神
##福
##禾
##秀
##秋
##空
##立
##章
##竹
##糹
##美
##義
##耳
##良
##艹
##花
##英
##華
##葉
##藤
##行
##街
##西
##見
##訁
##語
##谷
##貝
##貴
##車
##軍
##辶
##道
##郎
##郡
##部
##都
##里
##野
##金
##鈴
##镇
##長
##門
##間
##阝
##阿
##陳
##陽
##雄
##青
##面
##風
##食
##香
##馬
##高
##龍
##龸
##fi
##fl
##!
##(
##)
##,
##-
##.
##/
##:
##?
##~
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | faster_rcnn_pnas_feature_extractor_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_pnas_feature_extractor."""
import tensorflow as tf
from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas
class FasterRcnnPNASFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_pnas.FasterRCNNPNASFeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 19, 19, 4320])
def test_extract_proposal_features_input_size_224(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 14, 14, 4320])
def test_extract_proposal_features_input_size_112(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 4320])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [2, 9, 9, 4320])
def test_filter_scaling_computation(self):
expected_filter_scaling = {
((4, 8), 2): 1.0,
((4, 8), 7): 2.0,
((4, 8), 8): 2.0,
((4, 8), 9): 4.0
}
for args, filter_scaling in expected_filter_scaling.items():
reduction_indices, start_cell_num = args
self.assertAlmostEqual(
frcnn_pnas._filter_scaling(reduction_indices, start_cell_num),
filter_scaling)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/layers | layers | __init__ | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
import torch
from .batch_norm import FrozenBatchNorm2d
from .misc import Conv2d
from .misc import ConvTranspose2d
from .misc import interpolate
from .misc import nhwc_to_nchw_transform, nchw_to_nhwc_transform
from .nms import nms
from .roi_align import ROIAlign
from .roi_align import roi_align
from .roi_pool import ROIPool
from .roi_pool import roi_pool
from .smooth_l1_loss import smooth_l1_loss
__all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool",
"smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate",
"FrozenBatchNorm2d", "nhwc_to_nchw_transform", "nchw_to_nhwc_transform"
]
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B4/training/AMP | AMP | train_benchmark_8xA100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b4_cfg.py \
--mode train_and_eval \
--use_amp \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 2 \
--save_checkpoint_freq 5 \
--train_batch_size 160 \
--eval_batch_size 160 \
--train_img_size 380 \
--eval_img_size 380 \
--augmenter_name autoaugment \
--lr_decay cosine \
--mixup_alpha 0.2 \
--memory_limit 81000 \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005 |
TensorFlow2/LanguageModeling/BERT/scripts | scripts | run_pretraining_lamb | #!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
echo "Container nvidia build = " $NVIDIA_BUILD_ID
train_batch_size_phase1=${1:-60}
train_batch_size_phase2=${2:-10}
eval_batch_size=${3:-8}
learning_rate_phase1=${4:-"7.5e-4"}
learning_rate_phase2=${5:-"5e-4"}
precision=${6:-"fp16"}
use_xla=${7:-"true"}
num_gpus=${8:-8}
warmup_steps_phase1=${9:-"2133"}
warmup_steps_phase2=${10:-"213"}
train_steps=${11:-8341}
save_checkpoints_steps=${12:-100}
num_accumulation_steps_phase1=${13:-128}
num_accumulation_steps_phase2=${14:-384}
bert_model=${15:-"large"}
DATA_DIR=data
export DATA_DIR=$DATA_DIR
GBS1=$(expr $train_batch_size_phase1 \* $num_gpus \* $num_accumulation_steps_phase1)
GBS2=$(expr $train_batch_size_phase2 \* $num_gpus \* $num_accumulation_steps_phase2)
printf -v TAG "tf_bert_pretraining_lamb_%s_%s_gbs1%d_gbs2%d" "$bert_model" "$precision" $GBS1 $GBS2
DATESTAMP=`date +'%y%m%d%H%M%S'`
#Edit to save logs & checkpoints in a different directory
RESULTS_DIR=${RESULTS_DIR:-/results/${TAG}_${DATESTAMP}}
LOGFILE=$RESULTS_DIR/$TAG.$DATESTAMP.log
mkdir -m 777 -p $RESULTS_DIR
printf "Saving checkpoints to %s\n" "$RESULTS_DIR"
printf "Logs written to %s\n" "$LOGFILE"
export RESULTS_DIR=$RESULTS_DIR
printf -v SCRIPT_ARGS "%d %d %d %e %e %s %s %d %d %d %d %d %d %d %s %s" \
$train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 \
$learning_rate_phase2 "$precision" "$use_xla" $num_gpus $warmup_steps_phase1 \
$warmup_steps_phase2 $train_steps $save_checkpoints_steps \
$num_accumulation_steps_phase1 $num_accumulation_steps_phase2 "$bert_model"
set -x
# RUN PHASE 1
bash scripts/run_pretraining_lamb_phase1.sh $SCRIPT_ARGS |& tee -a $LOGFILE
# RUN PHASE 2
bash scripts/run_pretraining_lamb_phase2.sh $SCRIPT_ARGS |& tee -a $LOGFILE
set +x
|
PyTorch/Detection/Efficientdet | Efficientdet | distributed_train | #!/bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NUM_PROC=$1
shift
mkdir ./EFFICIENTDET_DGX1_perf-train_AMP_NGPU8_BS-30
declare -a CMD
if [ -n "${SLURM_LOCALID-}" ]; then
# Mode 1: Slurm launched a task for each GPU and set some envvars; no need for parallel launch
if [ "${SLURM_NTASKS}" -gt "${SLURM_JOB_NUM_NODES}" ]; then
CMD=( './bind.sh' '--cpu=exclusive' '--' 'python' '-u' )
else
CMD=( 'python' '-u' )
fi
else
# Mode 2: Single-node Docker; need to launch tasks with Pytorch's distributed launch
CMD=( 'python' '-u' '-m' 'bind_launch' "--nproc_per_node=${NUM_PROC}" )
fi
"${CMD[@]}" train.py "$@"
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | s3dg | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for Gated Separable 3D network (S3D-G).
The network architecture is proposed by:
Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy,
Rethinking Spatiotemporal Feature Learning For Video Understanding.
https://arxiv.org/abs/1712.04851.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import i3d_utils
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal
inception_block_v1_3d = i3d_utils.inception_block_v1_3d
# Orignaly, arg_scope = slim.arg_scope and layers = slim, now switch to more
# update-to-date tf.contrib.* API.
arg_scope = tf.contrib.framework.arg_scope
layers = tf.contrib.layers
def s3dg_arg_scope(weight_decay=1e-7,
batch_norm_decay=0.999,
batch_norm_epsilon=0.001):
"""Defines default arg_scope for S3D-G.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
sc: An arg_scope to use for the models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# Turns off fused batch norm.
'fused': False,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': ['moving_vars'],
'moving_variance': ['moving_vars'],
}
}
with arg_scope(
[layers.conv3d, conv3d_spatiotemporal],
weights_regularizer=layers.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([conv3d_spatiotemporal], separable=True) as sc:
return sc
def self_gating(input_tensor, scope, data_format='NDHWC'):
"""Feature gating as used in S3D-G.
Transforms the input features by aggregating features from all
spatial and temporal locations, and applying gating conditioned
on the aggregated features. More details can be found at:
https://arxiv.org/abs/1712.04851
Args:
input_tensor: A 5-D float tensor of size [batch_size, num_frames,
height, width, channels].
scope: scope for `variable_scope`.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
Returns:
A tensor with the same shape as input_tensor.
"""
index_c = data_format.index('C')
index_d = data_format.index('D')
index_h = data_format.index('H')
index_w = data_format.index('W')
input_shape = input_tensor.get_shape().as_list()
t = input_shape[index_d]
w = input_shape[index_w]
h = input_shape[index_h]
num_channels = input_shape[index_c]
spatiotemporal_average = layers.avg_pool3d(
input_tensor, [t, w, h],
stride=1,
data_format=data_format,
scope=scope + '/self_gating/avg_pool3d')
weights = layers.conv3d(
spatiotemporal_average,
num_channels, [1, 1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=None,
data_format=data_format,
weights_initializer=trunc_normal(0.01),
scope=scope + '/self_gating/transformer_W')
tile_multiples = [1, t, w, h]
tile_multiples.insert(index_c, 1)
weights = tf.tile(weights, tile_multiples)
weights = tf.nn.sigmoid(weights)
return tf.multiply(weights, input_tensor)
def s3dg_base(inputs,
first_temporal_kernel_size=3,
temporal_conv_startat='Conv2d_2c_3x3',
gating_startat='Conv2d_2c_3x3',
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
data_format='NDHWC',
scope='InceptionV1'):
"""Defines the I3D/S3DG base architecture.
Note that we use the names as defined in Inception V1 to facilitate checkpoint
conversion from an image-trained Inception V1 checkpoint to I3D checkpoint.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
first_temporal_kernel_size: Specifies the temporal kernel size for the first
conv3d filter. A larger value slows down the model but provides little
accuracy improvement. The default is 7 in the original I3D and S3D-G but 3
gives better performance. Must be set to one of 1, 3, 5 or 7.
temporal_conv_startat: Specifies the first conv block to use 3D or separable
3D convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is
used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the
first valid block to use separable 3D convs. If provided block name is
not present, all valid blocks will use separable 3D convs. Note that
'Conv2d_1a_7x7' cannot be made into a separable 3D conv, but can be made
into a 2D or 3D conv using the `first_temporal_kernel_size` option.
gating_startat: Specifies the first conv block to use self gating.
'Conv2d_2c_3x3' is the first valid block to use self gating. If provided
block name is not present, all valid blocks will use separable 3D convs.
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values, or
if depth_multiplier <= 0.
"""
assert data_format in ['NDHWC', 'NCDHW']
end_points = {}
t = 1
# For inverted pyramid models, we start with gating switched off.
use_gating = False
self_gating_fn = None
def gating_fn(inputs, scope):
return self_gating(inputs, scope, data_format=data_format)
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with arg_scope([layers.conv3d], weights_initializer=trunc_normal(0.01)):
with arg_scope(
[layers.conv3d, layers.max_pool3d, conv3d_spatiotemporal],
stride=1,
data_format=data_format,
padding='SAME'):
# batch_size x 32 x 112 x 112 x 64
end_point = 'Conv2d_1a_7x7'
if first_temporal_kernel_size not in [1, 3, 5, 7]:
raise ValueError(
'first_temporal_kernel_size can only be 1, 3, 5 or 7.')
# Separable conv is slow when used at first conv layer.
net = conv3d_spatiotemporal(
inputs,
depth(64), [first_temporal_kernel_size, 7, 7],
stride=2,
separable=False,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'MaxPool_2a_3x3'
net = layers.max_pool3d(
net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = layers.conv3d(net, depth(64), [1, 1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 56 x 56 x 192
end_point = 'Conv2d_2c_3x3'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = conv3d_spatiotemporal(net, depth(192), [t, 3, 3], scope=end_point)
if use_gating:
net = self_gating(net, scope=end_point, data_format=data_format)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 28 x 28 x 192
end_point = 'MaxPool_3a_3x3'
net = layers.max_pool3d(
net, [1, 3, 3], stride=[1, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 32 x 28 x 28 x 256
end_point = 'Mixed_3b'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(64),
num_outputs_1_0a=depth(96),
num_outputs_1_0b=depth(128),
num_outputs_2_0a=depth(16),
num_outputs_2_0b=depth(32),
num_outputs_3_0b=depth(32),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_3c'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(128),
num_outputs_1_0a=depth(128),
num_outputs_1_0b=depth(192),
num_outputs_2_0a=depth(32),
num_outputs_2_0b=depth(96),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_4a_3x3'
net = layers.max_pool3d(
net, [3, 3, 3], stride=[2, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 512
end_point = 'Mixed_4b'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(192),
num_outputs_1_0a=depth(96),
num_outputs_1_0b=depth(208),
num_outputs_2_0a=depth(16),
num_outputs_2_0b=depth(48),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 512
end_point = 'Mixed_4c'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(160),
num_outputs_1_0a=depth(112),
num_outputs_1_0b=depth(224),
num_outputs_2_0a=depth(24),
num_outputs_2_0b=depth(64),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 512
end_point = 'Mixed_4d'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(128),
num_outputs_1_0a=depth(128),
num_outputs_1_0b=depth(256),
num_outputs_2_0a=depth(24),
num_outputs_2_0b=depth(64),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 528
end_point = 'Mixed_4e'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(112),
num_outputs_1_0a=depth(144),
num_outputs_1_0b=depth(288),
num_outputs_2_0a=depth(32),
num_outputs_2_0b=depth(64),
num_outputs_3_0b=depth(64),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 16 x 14 x 14 x 832
end_point = 'Mixed_4f'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(256),
num_outputs_1_0a=depth(160),
num_outputs_1_0b=depth(320),
num_outputs_2_0a=depth(32),
num_outputs_2_0b=depth(128),
num_outputs_3_0b=depth(128),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_5a_2x2'
net = layers.max_pool3d(
net, [2, 2, 2], stride=[2, 2, 2], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 8 x 7 x 7 x 832
end_point = 'Mixed_5b'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(256),
num_outputs_1_0a=depth(160),
num_outputs_1_0b=depth(320),
num_outputs_2_0a=depth(32),
num_outputs_2_0b=depth(128),
num_outputs_3_0b=depth(128),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
# batch_size x 8 x 7 x 7 x 1024
end_point = 'Mixed_5c'
if temporal_conv_startat == end_point:
t = 3
if gating_startat == end_point:
use_gating = True
self_gating_fn = gating_fn
net = inception_block_v1_3d(
net,
num_outputs_0_0a=depth(384),
num_outputs_1_0a=depth(192),
num_outputs_1_0b=depth(384),
num_outputs_2_0a=depth(48),
num_outputs_2_0b=depth(128),
num_outputs_3_0b=depth(128),
temporal_kernel_size=t,
self_gating_fn=self_gating_fn,
data_format=data_format,
scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def s3dg(inputs,
num_classes=1000,
first_temporal_kernel_size=3,
temporal_conv_startat='Conv2d_2c_3x3',
gating_startat='Conv2d_2c_3x3',
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
dropout_keep_prob=0.8,
is_training=True,
prediction_fn=layers.softmax,
spatial_squeeze=True,
reuse=None,
data_format='NDHWC',
scope='InceptionV1'):
"""Defines the S3D-G architecture.
The default image size used to train this network is 224x224.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
num_classes: number of predicted classes.
first_temporal_kernel_size: Specifies the temporal kernel size for the first
conv3d filter. A larger value slows down the model but provides little
accuracy improvement. Must be set to one of 1, 3, 5 or 7.
temporal_conv_startat: Specifies the first conv block to use separable 3D
convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is
used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the
first valid block to use separable 3D convs. If provided block name is
not present, all valid blocks will use separable 3D convs.
gating_startat: Specifies the first conv block to use self gating.
'Conv2d_2c_3x3' is the first valid block to use self gating. If provided
block name is not present, all valid blocks will use separable 3D convs.
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
assert data_format in ['NDHWC', 'NCDHW']
# Final pooling and prediction
with tf.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers.batch_norm, layers.dropout], is_training=is_training):
net, end_points = s3dg_base(
inputs,
first_temporal_kernel_size=first_temporal_kernel_size,
temporal_conv_startat=temporal_conv_startat,
gating_startat=gating_startat,
final_endpoint=final_endpoint,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
data_format=data_format,
scope=scope)
with tf.variable_scope('Logits'):
if data_format.startswith('NC'):
net = tf.transpose(net, [0, 2, 3, 4, 1])
kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])
net = layers.avg_pool3d(
net,
kernel_size,
stride=1,
data_format='NDHWC',
scope='AvgPool_0a_7x7')
net = layers.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = layers.conv3d(
net,
num_classes, [1, 1, 1],
activation_fn=None,
normalizer_fn=None,
data_format='NDHWC',
scope='Conv2d_0c_1x1')
# Temporal average pooling.
logits = tf.reduce_mean(logits, axis=1)
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
s3dg.default_image_size = 224
|
Tools/PyTorch/TimeSeriesPredictionPlatform/inference | inference | inference | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Dict, List, Optional, Tuple
import dllogger
import hydra
import numpy as np
import torch
from apex import amp
from omegaconf import OmegaConf
import conf.conf_utils
from loggers.log_helper import setup_logger
from data.data_utils import Preprocessor
def run_inference(config):
cfg = config
with open(os.path.join(cfg.checkpoint, ".hydra/config.yaml"), "rb") as f:
config = OmegaConf.load(f)
if cfg.get("evaluator", None) is not None:
config.evaluator.config = OmegaConf.merge(config.evaluator.config, cfg.evaluator.config)
if cfg.get("dataset_dir", None):
if not os.path.isdir(config.dataset.config.dest_path):
raise ValueError("dataset_dir must be a directory")
config.dataset.config.dest_path = cfg.dataset_dir
config.evaluator.config.device = cfg.device
if cfg.get("dataset_path", None):
preprocessor = Preprocessor(config.dataset.config)
if cfg.get("preproc_state_path", None):
preprocessor_state_file = cfg.preproc_state_path
else:
preprocessor_state_file = None
preprocessor.load_state(preprocessor_state_file)
test_df = preprocessor.preprocess_test(dataset=cfg.dataset_path)
test_df = preprocessor.apply_scalers(test_df)
test_df = preprocessor.impute(test_df)
train, valid, test = hydra.utils.call(config.dataset, input_df=test_df)
else:
train, valid, test = hydra.utils.call(config.dataset)
del train, valid
evaluator = hydra.utils.instantiate(config.evaluator, test_data=test)
model = hydra.utils.instantiate(config.model)
if not (config.dataset.config.get('xgb', False) or config.dataset.config.get('stat', False)):
state_dict = torch.load(os.path.join(cfg.checkpoint, "best_checkpoint.zip"))['model_state_dict']
model.load_state_dict(state_dict)
device = torch.device(cfg.device) # maybe change depending on evaluator
model.to(device=device)
precision = cfg.precision
assert precision in ["fp16", "fp32"], "Precision needs to be either fp32 or fp16"
if precision == "fp16":
model = amp.initialize(model, opt_level="O2")
else:
model.load(cfg.checkpoint)
preds_full, labels_full, ids_full, weights_full = evaluator.predict(model)
eval_metrics = evaluator.evaluate(preds_full, labels_full, ids_full, weights_full)
logger = setup_logger(cfg)
logger.log(step=[], data={k: float(v) for k, v in eval_metrics.items()}, verbosity=dllogger.Verbosity.VERBOSE)
logger.log(step='event', data={"String": "Evaluation Metrics: {}".format(eval_metrics)}, verbosity=dllogger.Verbosity.DEFAULT)
return eval_metrics
|
TensorFlow2/Segmentation/nnUNet/models | models | nn_unet | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tensorflow as tf
from runtime.utils import get_config_file, get_tta_flips, is_main_process
from skimage.transform import resize
from models.sliding_window import get_importance_kernel, sliding_window_inference
from models.unet import UNet
class NNUnet(tf.keras.Model):
def __init__(self, args, loaded_model=None):
super(NNUnet, self).__init__()
self.args = args
in_channels, n_class, kernels, strides, self.patch_size = self.get_unet_params(self.args)
self.n_class = n_class
input_shape = (None, None, None, in_channels)
if self.args.dim == 3:
input_shape = (None,) + input_shape
if loaded_model is not None:
input_dtype = tf.float16 if args.amp else tf.float32
@tf.function
def wrapped_model(inputs, *args, **kwargs):
return loaded_model(tf.cast(inputs, dtype=input_dtype), *args, **kwargs)
self.model = wrapped_model
else:
if not self.args.xla and self.args.norm == "instance":
self.args.norm = "atex_instance"
self.model = UNet(
input_shape=input_shape,
n_class=n_class,
kernels=kernels,
strides=strides,
dimension=self.args.dim,
normalization_layer=self.args.norm,
negative_slope=self.args.negative_slope,
deep_supervision=self.args.deep_supervision,
)
if is_main_process():
print(f"Filters: {self.model.filters},\nKernels: {kernels}\nStrides: {strides}")
self.tta_flips = get_tta_flips(self.args.dim)
if self.args.dim == 3:
self.predictor = self.sw_inference
elif self.args.benchmark:
self.predictor = self.call
else:
self.predictor = self.call_2d
if args.dim == 3:
importance_kernel = get_importance_kernel(self.patch_size, args.blend_mode, 0.125)
self.importance_map = tf.tile(
tf.reshape(importance_kernel, shape=[1, *self.patch_size, 1]),
multiples=[1, 1, 1, 1, n_class],
)
@tf.function
def call(self, *args, **kwargs):
return self.model(*args, **kwargs)
@tf.function(reduce_retracing=True)
def call_2d(self, *args, **kwargs):
return self.model(*args, **kwargs)
@tf.function
def compute_loss(self, loss_fn, label, preds):
if self.args.deep_supervision:
upsample_layer = tf.keras.layers.UpSampling3D if self.args.dim == 3 else tf.keras.layers.UpSampling2D
loss = loss_fn(label, preds[0])
upsample_factor = np.ones(self.args.dim, dtype=np.uint8)
for i, pred in enumerate(preds[1:]):
upsample_factor = upsample_factor * self.model.strides[i + 1]
upsampled_pred = upsample_layer(upsample_factor)(pred)
loss += 0.5 ** (i + 1) * loss_fn(label, upsampled_pred)
c_norm = 1 / (2 - 2 ** (-len(preds)))
return c_norm * loss
return loss_fn(label, preds)
def sw_inference(self, img, **kwargs):
return sliding_window_inference(
inputs=img,
roi_size=self.patch_size,
model=self.model,
overlap=self.args.overlap,
n_class=self.n_class,
importance_map=self.importance_map,
**kwargs,
)
def inference(self, img):
pred = self.predictor(img, training=False)
if self.args.tta:
for flip_axes in self.tta_flips:
flipped_img = tf.reverse(img, axis=flip_axes)
flipped_pred = self.predictor(flipped_img, training=False)
pred = pred + tf.reverse(flipped_pred, axis=flip_axes)
pred = pred / (len(self.tta_flips) + 1)
return pred
@staticmethod
def get_unet_params(args):
config = get_config_file(args)
patch_size, spacings = config["patch_size"], config["spacings"]
strides, kernels, sizes = [], [], patch_size[:]
while True:
spacing_ratio = [spacing / min(spacings) for spacing in spacings]
stride = [2 if ratio <= 2 and size >= 8 else 1 for (ratio, size) in zip(spacing_ratio, sizes)]
kernel = [3 if ratio <= 2 else 1 for ratio in spacing_ratio]
if all(s == 1 for s in stride):
break
sizes = [i / j for i, j in zip(sizes, stride)]
spacings = [i * j for i, j in zip(spacings, stride)]
kernels.append(kernel)
strides.append(stride)
if len(strides) == 5:
break
strides.insert(0, len(spacings) * [1])
kernels.append(len(spacings) * [3])
return config["in_channels"], config["n_class"], kernels, strides, patch_size
@staticmethod
def layout_2d(x):
if x is None:
return None
batch_size, depth, height, width, channels = x.shape
return tf.reshape(x, (batch_size * depth, height, width, channels))
def adjust_batch(self, features, labels):
if self.args.dim == 2:
features, labels = self.layout_2d(features), self.layout_2d(labels)
return features, labels
def save_pred(self, pred, meta, idx, data_module, save_dir):
meta = meta[0].numpy()
original_shape = meta[2]
min_d, max_d = meta[0, 0], meta[1, 0]
min_h, max_h = meta[0, 1], meta[1, 1]
min_w, max_w = meta[0, 2], meta[1, 2]
if len(pred.shape) == 5 and pred.shape[0] == 1:
pred = tf.squeeze(pred, 0)
if not all(original_shape == pred.shape[:-1]):
paddings = [
[min_d, original_shape[0] - max_d],
[min_h, original_shape[1] - max_h],
[min_w, original_shape[2] - max_w],
[0, 0],
]
final_pred = tf.pad(pred, paddings=paddings)
else:
final_pred = pred
final_pred = tf.nn.softmax(final_pred, axis=-1)
final_pred = final_pred.numpy()
final_pred = np.moveaxis(final_pred, -1, 0)
if not all(original_shape == final_pred.shape[1:]):
class_ = final_pred.shape[0]
resized_pred = np.zeros((class_, *original_shape))
for i in range(class_):
resized_pred[i] = resize(
final_pred[i], original_shape, order=3, mode="edge", cval=0, clip=True, anti_aliasing=False
)
final_pred = resized_pred
fname = data_module.test_fname(idx)
output_fname = os.path.basename(fname).replace("_x", "")
np.save(os.path.join(save_dir, output_fname), final_pred, allow_pickle=False)
|
PaddlePaddle/Classification/RN50v1.5/scripts/inference | inference | infer_resnet50_TF32 | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python inference.py \
--trt-inference-dir ./inference_tf32 \
--trt-precision FP32 \
--dali-num-threads 8 \
--batch-size 256 \
--benchmark-steps 1024 \
--benchmark-warmup-steps 16 \
--trt-use-synthetic True
|
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | multiproc | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
import subprocess
import torch
def main():
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--world-size' in argslist:
argslist[argslist.index('--world-size') + 1] = str(world_size)
else:
argslist.append('--world-size')
argslist.append(str(world_size))
workers = []
for i in range(world_size):
if '--rank' in argslist:
argslist[argslist.index('--rank') + 1] = str(i)
else:
argslist.append('--rank')
argslist.append(str(i))
stdout = None if i == 0 else subprocess.DEVNULL
worker = subprocess.Popen(
[str(sys.executable)] + argslist, stdout=stdout)
workers.append(worker)
returncode = 0
try:
pending = len(workers)
while pending > 0:
for worker in workers:
try:
worker_returncode = worker.wait(1)
except subprocess.TimeoutExpired:
continue
pending -= 1
if worker_returncode != 0:
if returncode != 1:
for worker in workers:
worker.terminate()
returncode = 1
except KeyboardInterrupt:
print('Pressed CTRL-C, TERMINATING')
for worker in workers:
worker.terminate()
for worker in workers:
worker.wait()
raise
sys.exit(returncode)
if __name__ == "__main__":
main()
|
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/training/AMP | AMP | convergence_8xA100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v2/s_cfg.py \
--mode train_and_eval \
--use_amp \
--use_xla \
--model_dir ./output/ \
--data_dir /data/ \
--log_steps 500 \
--save_checkpoint_freq 10 \
--n_stages 4 \
--max_epochs 350 \
--train_batch_size 460 \
--train_img_size 300 \
--base_img_size 128 \
--lr_decay cosine \
--lr_init 0.005 \
--weight_decay .000005 \
--opt_epsilon 0.001 \
--moving_average_decay 0.9999 \
--eval_img_size 384 \
--eval_batch_size 100 \
--augmenter_name randaugment \
--raug_num_layers 2 \
--raug_magnitude 15 \
--cutmix_alpha 0 \
--mixup_alpha 0 \
--defer_img_mixing
|
TensorFlow/Detection/SSD/configs | configs | ssd320_full_4gpus | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SSD with Resnet 50 v1 FPN feature extractor, shared box predictor and focal
# loss (a.k.a Retinanet).
# See Lin et al, https://arxiv.org/abs/1708.02002
# Trained on COCO, initialized from Imagenet classification checkpoint
model {
ssd {
inplace_batchnorm_update: true
freeze_batchnorm: true
num_classes: 90
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
encode_background_as_zeros: true
anchor_generator {
multiscale_anchor_generator {
min_level: 3
max_level: 7
anchor_scale: 4.0
aspect_ratios: [1.0, 2.0, 0.5]
scales_per_octave: 2
}
}
image_resizer {
fixed_shape_resizer {
height: 320
width: 320
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
depth: 256
class_prediction_bias_init: -4.6
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
random_normal_initializer {
stddev: 0.01
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
}
num_layers_before_predictor: 4
kernel_size: 3
}
}
feature_extractor {
type: 'ssd_resnet50_v1_fpn'
fpn {
min_level: 3
max_level: 7
}
min_depth: 16
depth_multiplier: 1.0
conv_hyperparams {
activation: RELU_6,
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
batch_norm {
scale: true,
decay: 0.997,
epsilon: 0.001,
}
}
override_base_feature_extractor_hyperparams: true
}
loss {
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 2.0
}
}
localization_loss {
weighted_smooth_l1 {
}
}
classification_weight: 1.0
localization_weight: 1.0
}
normalize_loss_by_num_matches: true
normalize_loc_loss_by_codesize: true
post_processing {
batch_non_max_suppression {
score_threshold: 1e-8
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
}
}
train_config: {
fine_tune_checkpoint: "/checkpoints/resnet_v1_50/model.ckpt"
fine_tune_checkpoint_type: "classification"
batch_size: 32
sync_replicas: true
startup_delay_steps: 0
replicas_to_aggregate: 8
num_steps: 25000
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_crop_image {
min_object_covered: 0.0
min_aspect_ratio: 0.75
max_aspect_ratio: 3.0
min_area: 0.75
max_area: 1.0
overlap_thresh: 0.0
}
}
optimizer {
momentum_optimizer: {
learning_rate: {
cosine_decay_learning_rate {
learning_rate_base: .08000000000000000000
total_steps: 25000
warmup_learning_rate: .03466560000000000000
warmup_steps: 2000
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
}
train_input_reader: {
tf_record_input_reader {
input_path: "/data/coco2017_tfrecords/*train*"
}
label_map_path: "object_detection/data/mscoco_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
num_examples: 8000
}
eval_input_reader: {
tf_record_input_reader {
input_path: "/data/coco2017_tfrecords/*val*"
}
label_map_path: "object_detection/data/mscoco_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
TensorFlow2/Detection/Efficientdet/scripts/D0 | D0 | evaluate-AMP-8xV100-32G | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bs=64
ema=0.9999
mkdir -p /tmp/evaluate-AMP-8xV100-32G
mpirun -np 8 --allow-run-as-root --bind-to none \
-map-by slot -x LD_LIBRARY_PATH -x PATH \
-mca pml ob1 -mca btl ^openib \
-x CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
python3 eval.py \
--val_file_pattern=/workspace/coco/val-* \
--val_json_file=/workspace/coco/annotations/instances_val2017.json \
--ckpt_path=${CKPT:-/checkpoints/emackpt-300} \
--batch_size=$bs \
--amp=True \
--hparams="moving_average_decay=$ema" \
2>&1 | tee /tmp/evaluate-AMP-8xV100-32G/eval.log |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/bin | bin | build_denoiser | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "cudaUtils.h"
#include "denoiserBuilder.h"
#include "engineCache.h"
#include "jsonModelImporter.h"
#include "logging.h"
#include "NvInfer.h"
#include <iostream>
#include <memory>
using namespace nvinfer1;
using namespace tts;
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
bool matches(const std::string& arg, const std::string& flag)
{
return arg.length() >= flag.length() && arg.substr(0, flag.length()) == flag;
}
int parseNumFlag(
const int argc, const char** argv, const std::string& flag, int* i)
{
int value;
const std::string arg(argv[*i]);
if (arg.length() > flag.length()) {
value = std::stol(arg.substr(flag.length()));
} else if (*i + 1 < argc) {
++(*i);
value = std::stol(argv[*i]);
} else {
throw std::runtime_error("Missing argument for '" + flag + "'.");
}
return value;
}
int parseAmpFlag(
const int argc, const char** argv, const std::string& flag, int* i)
{
std::string str;
const std::string arg(argv[*i]);
if (arg.length() > flag.length()) {
str = arg.substr(flag.length());
} else if (*i + 1 < argc) {
++(*i);
str = argv[*i];
} else {
throw std::runtime_error("Missing argument for '" + flag + "'.");
}
int value;
if (str == "fp32") {
value = 0;
} else if (str == "amp") {
value = 1;
} else {
throw std::runtime_error(
"Invalid argument for precision (amp|fp32): " + str);
}
return value;
}
void usage(const std::string& binName)
{
std::cerr << "usage: " << std::endl;
std::cerr << " " << binName << " <model file> <engine file> [options]\n";
std::cerr << "options:" << std::endl;
std::cerr << " -B<batch size>" << std::endl;
std::cerr << " -F<precision (fp32|amp)>" << std::endl;
std::cerr << " -h" << std::endl;
}
void parseArgs(
const int argc,
const char** const argv,
std::string* model,
std::string* enginePath,
int* batchSize,
int* useAMP)
{
bool modelSet = false;
bool enginePathSet = false;
for (int i = 1; i < argc; ++i) {
const std::string arg(argv[i]);
if (matches(arg, "-B")) {
*batchSize = parseNumFlag(argc, argv, "-B", &i);
} else if (matches(arg, "-F")) {
*useAMP = parseAmpFlag(argc, argv, "-F", &i);
} else if (matches(arg, "-h")) {
usage(argv[0]);
exit(0);
} else {
if (!modelSet) {
*model = arg;
modelSet = true;
} else if (!enginePathSet) {
*enginePath = arg;
enginePathSet = true;
} else {
throw std::runtime_error("Unknown extra argument '" + arg + "'.");
}
}
}
}
/******************************************************************************
* MAIN ***********************************************************************
*****************************************************************************/
int main(int argc, const char* argv[])
{
std::string denoiserModelPath;
std::string enginePath;
int batchSize = 1;
int useFP16 = true;
parseArgs(argc, argv, &denoiserModelPath, &enginePath, &batchSize, &useFP16);
if (denoiserModelPath.empty() || enginePath.empty()) {
usage(argv[0]);
return 1;
}
CudaUtils::printDeviceInformation();
try {
std::shared_ptr<Logger> logger(new Logger(ILogger::Severity::kERROR));
TRTPtr<IBuilder> builder(createInferBuilder(*logger));
builder->setMaxBatchSize(batchSize);
TRTPtr<IBuilderConfig> config(builder->createBuilderConfig());
config->setMaxWorkspaceSize(1ULL << 30);
uint32_t flags = 0;
if (useFP16) {
flags |= (1U << static_cast<int>(BuilderFlag::kFP16));
}
config->setFlags(flags);
EngineCache cache(logger);
JSONModelImporter importer(denoiserModelPath);
const int denoiserWindowSize = 2 << 13;
DenoiserBuilder denoiserBuilder(denoiserWindowSize);
const TRTPtr<ICudaEngine> engine
= denoiserBuilder.build(importer, *builder, batchSize, useFP16);
cache.save(*engine, enginePath);
} catch (const std::exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
return 1;
}
return 0;
}
|
PyTorch/Recommendation/DLRM/dlrm/cuda_src | cuda_src | pytorch_embedding_ops | #include <torch/extension.h>
torch::Tensor gatherGPUFusedFwdTorch(torch::Tensor embedding,
torch::Tensor indices,
torch::Tensor offsets,
bool amp_train);
torch::Tensor gatherGPUFusedBwdTorch(torch::Tensor embedding,
torch::Tensor indices,
torch::Tensor offsets,
torch::Tensor upstreamGrad);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("gather_gpu_fused_fwd", &gatherGPUFusedFwdTorch, "", py::arg("embedding"),
py::arg("indices"),
py::arg("offsets"),
py::arg("amp_train"));
m.def("gather_gpu_fused_bwd", &gatherGPUFusedBwdTorch, "", py::arg("embedding"),
py::arg("indices"),
py::arg("offsets"),
py::arg("upstreamGrad"));
}
|
PyTorch/Detection/SSD/examples | examples | inference | #!/usr/bin/env python
# coding: utf-8
# # Inference on pretrained SSD model using Tensor Cores
# In this tutorial we will show, how to run an inference with our SSD implementation.
#
# We will start with defining input pipeline, then we will see how to load the model and then we will run an inference.
# ## Loading an image
# Lets import libraries we will use to prepare an input image.
# In[1]:
import numpy as np
from matplotlib import pyplot as plt
import torch
get_ipython().run_line_magic('matplotlib', 'inline')
# From our examples we can import utility functions for inference:
# In[2]:
from dle.inference import load_image, rescale, crop_center, normalize
# Now, we can load an example image.
# In[3]:
img = load_image('http://images.cocodataset.org/val2017/000000397133.jpg')
plt.imshow(img)
# Next we will rescale it, crop it and normalize it, so the model will get the expected input:
# In[4]:
img = rescale(img, 300, 300)
img = crop_center(img, 300, 300)
img = normalize(img)
# We can present the image:
# In[5]:
plt.imshow(img)
# I looks weird, because after normalization, data values are in range [-1..1]. Plotting lib expects values from [0..1] range. We can fix it for visualization purpose:
# In[6]:
out = img/2+0.5
plt.imshow(out)
img.shape
# ## Building an predictor
# We have prepared our imput. Next thing is to load a SSD model.
# In our examples you can find some framework specific functions. Some of them will be explained here in detail.
# In[7]:
from examples.SSD300_inference import load_checkpoint, build_predictor
# Now we can import the model. We need to set it in the evaluation mode also:
# In[8]:
from apex.fp16_utils import network_to_half
ssd300 = build_predictor('/checkpoints/SSD300v1.1.pt')
ssd300 = ssd300.cuda()
ssd300 = network_to_half(ssd300.cuda())
ssd300 = ssd300.eval()
# The model does not expect input as a `ndarray`. It prefers Pytorch Tensor data format. It also expects that input will be a batch of several images. What is more, it expects input in a bit different shape than usual.
# We can fulfill these requirements with following code:
# In[9]:
# change the shape
HWC = img
CHW = np.swapaxes(np.swapaxes(HWC, 0, 2), 1, 2)
# make a batch of 1 image
batch = np.expand_dims(CHW, axis=0)
# turn input into tensor
tensor = torch.from_numpy(batch)
tensor = tensor.cuda()
tensor = tensor.half()
tensor.shape
# ## Running prediction
# Finally, we can make some prediction:
# In[10]:
prediction = ssd300(tensor)
# However, the output from the model is not too easy to read. To present it in human-readable form there are a few steps missing.
# At first, more imports...
# In[11]:
from ssd.utils import dboxes300_coco, Encoder
import matplotlib.patches as patches
import json
# Which allows us to decode the result:
# In[12]:
dboxes = dboxes300_coco()
encoder = Encoder(dboxes)
ploc, plabel = [val.float() for val in prediction]
encoded = encoder.decode_batch(ploc, plabel, criteria=0.5, max_output=20)
# A `criteria` param allows to filter results with IoU not lower than the `criteria`.
#
# Encoder returns a batch of results in a form:
# ```
# [ fst img: prediction,
# snd_img: prediction,
# ...
# ]
# ```
# While the prediction is:
# ```
# ( bounding boxes: [ fst detection: [x1, y1, x2, y2], snd detection ... ],
# classes: [ fst detextion: class idx, snd detection ... ],
# confidences: [ fst detextion: confidence, snd detection ... ]
# )
# ```
#
# Now we cant take the result bact to the numpy world and put results on an image.
# We have single input image, then we will get rid of the batch also:
# In[13]:
bboxes, classes, confidences = [x.detach().cpu().numpy() for x in encoded[0]]
# Next, we can filter results with confidence lower than some treshold:
# In[14]:
best = np.argwhere(confidences > 0.3).squeeze()
# To show labels on the detections we need to decode labels. Our model is trained on COCO 2017 dataset. Then we will use labels from COCO:
# In[15]:
json_file = '/datasets/coco2017/annotations/instances_val2017.json'
with open(json_file,'r') as COCO:
js = json.loads(COCO.read())
class_names = [ category['name'] for category in js['categories'] ]
# Now we can start building the picture with results.
# Bounding boxes returned by the model are enclosed in a [0..1] range. We need to scale it to [0..300], as it is the size of the image.
# In[16]:
fig,ax = plt.subplots(1)
ax.imshow(out)
for idx in best:
left, top, right, bottom = bboxes[idx]
x, y, w, h = [val*300 for val in [left, top, right-left, bottom-top]]
rect = patches.Rectangle((x, y),w,h,linewidth=1,edgecolor='r',facecolor='none')
ax.add_patch(rect)
ax.text(x, y, class_names[classes[idx]-1], bbox=dict(facecolor='white', alpha=0.5))
plt.show()
# In[ ]:
|
TensorFlow2/Recommendation/WideAndDeep/triton/scripts/docker | docker | interactive | #!/usr/bin/env bash
# Copyright (c) 2021-2022,NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=0}
docker run -it --rm \
--runtime=nvidia \
-e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \
--net=host \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
--ipc=host \
-e WORKDIR="$(pwd)" \
-e PYTHONPATH="$(pwd)" \
-v "$(pwd)":"$(pwd)" \
-v /var/run/docker.sock:/var/run/docker.sock \
-w "$(pwd)" \
widendeep:latest bash
|
PyTorch/SpeechSynthesis/FastPitch/triton | triton | convert_model | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
`convert_model.py` script allows to convert between model formats with additional model optimizations
for faster inference.
It converts model from results of get_model function.
Currently supported input and output formats are:
- inputs
- `tf-estimator` - `get_model` function returning Tensorflow Estimator
- `tf-keras` - `get_model` function returning Tensorflow Keras Model
- `tf-savedmodel` - Tensorflow SavedModel binary
- `pyt` - `get_model` function returning PyTorch Module
- output
- `tf-savedmodel` - Tensorflow saved model
- `tf-trt` - TF-TRT saved model
- `ts-trace` - PyTorch traced ScriptModule
- `ts-script` - PyTorch scripted ScriptModule
- `onnx` - ONNX
- `trt` - TensorRT plan file
For tf-keras input you can use:
- --large-model flag - helps loading model which exceeds maximum protobuf size of 2GB
- --tf-allow-growth flag - control limiting GPU memory growth feature
(https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled.
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import (
DATALOADER_FN_NAME,
BaseConverter,
BaseLoader,
BaseSaver,
Format,
Precision,
load_from_file,
)
from .deployment_toolkit.extensions import converters, loaders, savers
LOGGER = logging.getLogger("convert_model")
INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.TF_SAVEDMODEL, Format.PYT]
OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TF_TRT, Format.ONNX, Format.TRT, Format.TS_TRACE, Format.TS_SCRIPT]
def _get_args():
parser = argparse.ArgumentParser(description="Script for conversion between model formats.", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model file (python module or binary file)", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter is not None:
ArgParserGenerator(Converter).update_argparser(parser)
Saver: BaseSaver = savers.get(args.output_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
requested_model_precision = Precision(args.precision)
dataloader_fn = None
# if conversion is required, temporary change model load precision to that required by converter
# it is for TensorRT converters which require fp32 models for all requested precisions
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter:
args.precision = Converter.required_source_model_precision(requested_model_precision).value
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
model = loader.load(args.input_path)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if Converter: # if conversion is needed
# dataloader must much source model precision - so not recovering it yet
if args.dataloader is not None:
if args.p_arpabet > 0.0:
from common.text import cmudict
cmudict.initialize(args.cmudict_path, args.heteronyms_path)
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
# recover precision to that requested by user
args.precision = requested_model_precision.value
if Converter:
converter = ArgParserGenerator(Converter).from_args(args)
model = converter.convert(model, dataloader_fn=dataloader_fn)
Saver: BaseSaver = savers.get(args.output_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path)
return 0
if __name__ == "__main__":
main()
|
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads | heads | head | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base head class.
All the different kinds of prediction heads in different models will inherit
from this class. What is in common between all head classes is that they have a
`predict` function that receives `features` as its first argument.
How to add a new prediction head to an existing meta architecture?
For example, how can we add a `3d shape` prediction head to Mask RCNN?
We have to take the following steps to add a new prediction head to an
existing meta arch:
(a) Add a class for predicting the head. This class should inherit from the
`Head` class below and have a `predict` function that receives the features
and predicts the output. The output is always a tf.float32 tensor.
(b) Add the head to the meta architecture. For example in case of Mask RCNN,
go to box_predictor_builder and put in the logic for adding the new head to the
Mask RCNN box predictor.
(c) Add the logic for computing the loss for the new head.
(d) Add the necessary metrics for the new head.
(e) (optional) Add visualization for the new head.
"""
from abc import abstractmethod
import tensorflow as tf
class Head(object):
"""Mask RCNN head base class."""
def __init__(self):
"""Constructor."""
pass
@abstractmethod
def predict(self, features, num_predictions_per_location):
"""Returns the head's predictions.
Args:
features: A float tensor of features.
num_predictions_per_location: Int containing number of predictions per
location.
Returns:
A tf.float32 tensor.
"""
pass
class KerasHead(tf.keras.Model):
"""Keras head base class."""
def call(self, features):
"""The Keras model call will delegate to the `_predict` method."""
return self._predict(features)
@abstractmethod
def _predict(self, features):
"""Returns the head's predictions.
Args:
features: A float tensor of features.
Returns:
A tf.float32 tensor.
"""
pass
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | post_processing_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for post_processing_builder."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import post_processing_builder
from object_detection.protos import post_processing_pb2
class PostProcessingBuilderTest(tf.test.TestCase):
def test_build_non_max_suppressor_with_correct_parameters(self):
post_processing_text_proto = """
batch_non_max_suppression {
score_threshold: 0.7
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
non_max_suppressor, _ = post_processing_builder.build(
post_processing_config)
self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 100)
self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300)
self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7)
self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6)
def test_build_identity_score_converter(self):
post_processing_text_proto = """
score_converter: IDENTITY
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'identity_with_logit_scale')
inputs = tf.constant([1, 1], tf.float32)
outputs = score_converter(inputs)
with self.test_session() as sess:
converted_scores = sess.run(outputs)
expected_converted_scores = sess.run(inputs)
self.assertAllClose(converted_scores, expected_converted_scores)
def test_build_identity_score_converter_with_logit_scale(self):
post_processing_text_proto = """
score_converter: IDENTITY
logit_scale: 2.0
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'identity_with_logit_scale')
inputs = tf.constant([1, 1], tf.float32)
outputs = score_converter(inputs)
with self.test_session() as sess:
converted_scores = sess.run(outputs)
expected_converted_scores = sess.run(tf.constant([.5, .5], tf.float32))
self.assertAllClose(converted_scores, expected_converted_scores)
def test_build_sigmoid_score_converter(self):
post_processing_text_proto = """
score_converter: SIGMOID
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'sigmoid_with_logit_scale')
def test_build_softmax_score_converter(self):
post_processing_text_proto = """
score_converter: SOFTMAX
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale')
def test_build_softmax_score_converter_with_temperature(self):
post_processing_text_proto = """
score_converter: SOFTMAX
logit_scale: 2.0
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale')
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops | ops | postprocess_ops | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops used to post-process raw detections."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from mrcnn_tf2.utils import box_utils
def generate_detections_per_image_tpu(cls_outputs,
box_outputs,
anchor_boxes,
image_info,
pre_nms_num_detections=1000,
post_nms_num_detections=100,
nms_threshold=0.3,
bbox_reg_weights=(10., 10., 5., 5.)):
"""Generate the final detections per image given the model outputs.
Args:
cls_outputs: a tensor with shape [N, num_classes], which stacks class
logit outputs on all feature levels. The N is the number of total anchors
on all levels. The num_classes is the number of classes predicted by the
model. Note that the cls_outputs should be the output of softmax().
box_outputs: a tensor with shape [N, num_classes*4], which stacks box
regression outputs on all feature levels. The N is the number of total
anchors on all levels.
anchor_boxes: a tensor with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of total anchors on all levels.
image_info: a tensor of shape [5] which encodes the input image's [height,
width, scale, original_height, original_width]
pre_nms_num_detections: an integer that specifies the number of candidates
before NMS.
post_nms_num_detections: an integer that specifies the number of candidates
after NMS.
nms_threshold: a float number to specify the IOU threshold of NMS.
bbox_reg_weights: a list of 4 float scalars, which are default weights on
(dx, dy, dw, dh) for normalizing bbox regression targets.
Returns:
detections: Tuple of tensors corresponding to number of valid boxes,
box coordinates, object categories for each boxes, and box scores
-- respectively.
"""
num_boxes, num_classes = cls_outputs.get_shape().as_list()
# Remove background class scores.
cls_outputs = cls_outputs[:, 1:num_classes]
top_k_scores, top_k_indices_with_classes = tf.nn.top_k(
tf.reshape(cls_outputs, [-1]),
k=pre_nms_num_detections,
sorted=False
)
classes = tf.math.mod(top_k_indices_with_classes, num_classes - 1)
top_k_indices = tf.math.floordiv(top_k_indices_with_classes, num_classes - 1)
anchor_boxes = tf.gather(anchor_boxes, top_k_indices)
box_outputs = tf.reshape(box_outputs, [num_boxes, num_classes, 4])[:, 1:num_classes, :]
class_indices = classes
box_outputs = tf.gather_nd(box_outputs, tf.stack([top_k_indices, class_indices], axis=1))
# apply bounding box regression to anchors
boxes = box_utils.decode_boxes(box_outputs, anchor_boxes, bbox_reg_weights)
boxes = box_utils.clip_boxes(boxes, image_info[0], image_info[1])
list_of_all_boxes = []
list_of_all_scores = []
list_of_all_classes = []
# Skip background class.
for class_i in range(num_classes):
# Compute bitmask for the given classes.
class_i_bitmask = tf.cast(tf.equal(classes, class_i), top_k_scores.dtype)
# This works because score is in [0, 1].
class_i_scores = top_k_scores * class_i_bitmask
# The TPU and CPU have different behaviors for
# tf.image.non_max_suppression_padded (b/116754376).
class_i_post_nms_indices, class_i_nms_num_valid = tf.image.non_max_suppression_padded(
tf.cast(boxes, dtype=tf.float32),
tf.cast(class_i_scores, dtype=tf.float32),
post_nms_num_detections,
iou_threshold=nms_threshold,
score_threshold=0.05,
pad_to_max_output_size=True,
name='nms_detections_' + str(class_i)
)
class_i_post_nms_boxes = tf.gather(boxes, class_i_post_nms_indices)
class_i_post_nms_scores = tf.gather(class_i_scores, class_i_post_nms_indices)
mask = tf.less(tf.range(post_nms_num_detections), [class_i_nms_num_valid])
class_i_post_nms_scores = tf.where(
mask, class_i_post_nms_scores, tf.zeros_like(class_i_post_nms_scores)
)
class_i_classes = tf.fill(tf.shape(input=class_i_post_nms_scores), class_i + 1)
list_of_all_boxes.append(class_i_post_nms_boxes)
list_of_all_scores.append(class_i_post_nms_scores)
list_of_all_classes.append(class_i_classes)
post_nms_boxes = tf.concat(list_of_all_boxes, axis=0)
post_nms_scores = tf.concat(list_of_all_scores, axis=0)
post_nms_classes = tf.concat(list_of_all_classes, axis=0)
# sort all results.
post_nms_scores, sorted_indices = tf.nn.top_k(
tf.cast(post_nms_scores, dtype=tf.float32),
k=post_nms_num_detections,
sorted=True
)
post_nms_boxes = tf.gather(post_nms_boxes, sorted_indices)
post_nms_classes = tf.gather(post_nms_classes, sorted_indices)
valid_mask = tf.where(
tf.greater(post_nms_scores, 0), tf.ones_like(post_nms_scores),
tf.zeros_like(post_nms_scores)
)
num_valid_boxes = tf.reduce_sum(input_tensor=valid_mask, axis=-1)
box_classes = tf.cast(post_nms_classes, dtype=tf.float32)
return num_valid_boxes, post_nms_boxes, box_classes, post_nms_scores
def generate_detections_tpu(class_outputs,
box_outputs,
anchor_boxes,
image_info,
pre_nms_num_detections=1000,
post_nms_num_detections=100,
nms_threshold=0.3,
bbox_reg_weights=(10., 10., 5., 5.)
):
"""Generate the final detections given the model outputs (TPU version).
Args:
class_outputs: a tensor with shape [batch_size, N, num_classes], which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
box_outputs: a tensor with shape [batch_size, N, num_classes*4], which
stacks box regression outputs on all feature levels. The N is the number
of total anchors on all levels.
anchor_boxes: a tensor with shape [batch_size, N, 4], which stacks anchors
on all feature levels. The N is the number of total anchors on all levels.
image_info: a tensor of shape [batch_size, 5] which encodes each image's
[height, width, scale, original_height, original_width].
pre_nms_num_detections: an integer that specifies the number of candidates
before NMS.
post_nms_num_detections: an integer that specifies the number of candidates
after NMS.
nms_threshold: a float number to specify the IOU threshold of NMS.
bbox_reg_weights: a list of 4 float scalars, which are default weights on
(dx, dy, dw, dh) for normalizing bbox regression targets.
Returns:
a tuple of tensors corresponding to number of valid boxes,
box coordinates, object categories for each boxes, and box scores stacked
in batch_size.
"""
with tf.name_scope('generate_detections'):
batch_size, _, _ = class_outputs.get_shape().as_list()
softmax_class_outputs = tf.nn.softmax(class_outputs)
num_valid_boxes, box_coordinates, box_classes, box_scores = ([], [], [], [])
for i in range(batch_size):
result = generate_detections_per_image_tpu(
softmax_class_outputs[i], box_outputs[i], anchor_boxes[i],
image_info[i], pre_nms_num_detections, post_nms_num_detections,
nms_threshold, bbox_reg_weights)
num_valid_boxes.append(result[0])
box_coordinates.append(result[1])
box_classes.append(result[2])
box_scores.append(result[3])
num_valid_boxes = tf.stack(num_valid_boxes)
box_coordinates = tf.stack(box_coordinates)
box_classes = tf.stack(box_classes)
box_scores = tf.stack(box_scores)
return num_valid_boxes, box_coordinates, box_classes, box_scores
def generate_detections_gpu(class_outputs,
box_outputs,
anchor_boxes,
image_info,
pre_nms_num_detections=1000,
post_nms_num_detections=100,
nms_threshold=0.3,
bbox_reg_weights=(10., 10., 5., 5.)
):
"""Generate the final detections given the model outputs (GPU version).
Args:
class_outputs: a tensor with shape [batch_size, N, num_classes], which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
box_outputs: a tensor with shape [batch_size, N, num_classes*4], which
stacks box regression outputs on all feature levels. The N is the number
of total anchors on all levels.
anchor_boxes: a tensor with shape [batch_size, N, 4], which stacks anchors
on all feature levels. The N is the number of total anchors on all levels.
image_info: a tensor of shape [batch_size, 5] which encodes each image's
[height, width, scale, original_height, original_width].
pre_nms_num_detections: an integer that specifies the number of candidates
before NMS.
post_nms_num_detections: an integer that specifies the number of candidates
after NMS.
nms_threshold: a float number to specify the IOU threshold of NMS.
bbox_reg_weights: a list of 4 float scalars, which are default weights on
(dx, dy, dw, dh) for normalizing bbox regression targets.
Returns:
a tuple of tensors corresponding to number of valid boxes,
box coordinates, object categories for each boxes, and box scores stacked
in batch_size.
"""
with tf.name_scope('generate_detections'):
batch_size, num_boxes, num_classes = class_outputs.get_shape().as_list()
softmax_class_outputs = tf.nn.softmax(class_outputs)
# Remove background
scores = tf.slice(softmax_class_outputs, [0, 0, 1], [-1, -1, -1])
boxes = tf.slice(
tf.reshape(box_outputs, [batch_size, num_boxes, num_classes, 4]),
[0, 0, 1, 0], [-1, -1, -1, -1]
)
anchor_boxes = tf.expand_dims(anchor_boxes, axis=2) * tf.ones([1, 1, num_classes - 1, 1])
num_detections = num_boxes * (num_classes - 1)
boxes = tf.reshape(boxes, [batch_size, num_detections, 4])
scores = tf.reshape(scores, [batch_size, num_detections, 1])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
# Decode
boxes = box_utils.decode_boxes(boxes, anchor_boxes, bbox_reg_weights)
# Clip boxes
height = tf.expand_dims(image_info[:, 0:1], axis=-1)
width = tf.expand_dims(image_info[:, 1:2], axis=-1)
boxes = box_utils.clip_boxes(boxes, height, width)
# NMS
pre_nms_boxes = box_utils.to_normalized_coordinates(boxes, height, width)
pre_nms_boxes = tf.reshape(pre_nms_boxes, [batch_size, num_boxes, num_classes - 1, 4])
pre_nms_scores = tf.reshape(scores, [batch_size, num_boxes, num_classes - 1])
# fixed problems when running with Keras AMP
pre_nms_boxes = tf.cast(pre_nms_boxes, dtype=tf.float32)
pre_nms_scores = tf.cast(pre_nms_scores, dtype=tf.float32)
post_nms_boxes, post_nms_scores, post_nms_classes, \
post_nms_num_valid_boxes = tf.image.combined_non_max_suppression(
pre_nms_boxes,
pre_nms_scores,
max_output_size_per_class=pre_nms_num_detections,
max_total_size=post_nms_num_detections,
iou_threshold=nms_threshold,
score_threshold=0.0,
pad_per_class=False
)
post_nms_classes = post_nms_classes + 1
post_nms_boxes = box_utils.to_absolute_coordinates(post_nms_boxes, height, width)
return post_nms_num_valid_boxes, post_nms_boxes, tf.cast(post_nms_classes, dtype=tf.float32), post_nms_scores
|
PaddlePaddle/LanguageModeling/BERT | BERT | run_squad | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
import collections
import sys
import subprocess
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
from paddle.fluid.contrib.mixed_precision.fp16_utils import rewrite_program
from paddle.fluid.contrib.mixed_precision.fp16_lists import AutoMixedPrecisionLists
from modeling import BertForQuestionAnswering, BertConfig
from tokenizer import BertTokenizer
from squad_utils import get_answers
from loss import CrossEntropyLossForSQuAD
from squad_dataset import SQuAD, create_squad_data_holder
from utils.collate import Pad, Stack, Tuple
from utils.utility import get_num_trainers, get_trainer_id, set_seed
from utils.logger import setup_loggers
from utils.affinity import set_cpu_affinity
from utils.save_load import mkdir_if_not_exist, init_program, save_model
from utils.config import print_args, parse_args
from utils.task import Task
from optimizer import AdamW
from lr_scheduler import Poly
from program import dist_optimizer
import dllogger
def evaluate(args, exe, logits, dev_program, data_loader):
RawResult = collections.namedtuple(
"RawResult", ["unique_id", "start_logits", "end_logits"])
all_results = []
infer_start = time.time()
tic_eval = time.time()
tic_benchmark_begin = 0
tic_benchmark_end = 0
dllogger.log(step="PARAMETER", data={"eval_start": True})
for step, batch in enumerate(data_loader):
start_logits_tensor, end_logits_tensor = exe.run(dev_program,
feed=batch,
fetch_list=[*logits])
if args.benchmark and step == args.benchmark_warmup_steps:
tic_benchmark_begin = time.time()
if args.benchmark and step == args.benchmark_warmup_steps + args.benchmark_steps:
tic_benchmark_end = time.time()
unique_ids = np.array(batch[0]['unique_id'])
for idx in range(unique_ids.shape[0]):
if len(all_results) % 1000 == 0 and len(all_results):
dllogger.log(step="PARAMETER",
data={
"sample_number": len(all_results),
"time_per_1000": time.time() - tic_eval
})
tic_eval = time.time()
unique_id = int(unique_ids[idx])
start_logits = [float(x) for x in start_logits_tensor[idx]]
end_logits = [float(x) for x in end_logits_tensor[idx]]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
if args.benchmark:
time_to_benchmark = tic_benchmark_end - tic_benchmark_begin
dllogger.log(step=tuple(),
data={
"inference_sequences_per_second":
args.predict_batch_size * args.benchmark_steps /
time_to_benchmark
})
return
else:
time_to_infer = time.time() - infer_start
dllogger.log(step=tuple(),
data={
"e2e_inference_time": time_to_infer,
"inference_sequences_per_second":
len(data_loader.dataset.features) / time_to_infer
})
output_dir = os.path.join(args.output_dir, args.bert_model, "squad")
mkdir_if_not_exist(output_dir)
output_prediction_file = os.path.join(output_dir, "predictions.json")
output_nbest_file = os.path.join(output_dir, "nbest_predictions.json")
answers, nbest_answers = get_answers(args, data_loader.dataset.examples,
data_loader.dataset.features,
all_results)
with open(output_prediction_file, "w") as f:
f.write(json.dumps(answers, indent=4) + "\n")
with open(output_nbest_file, "w") as f:
f.write(json.dumps(nbest_answers, indent=4) + "\n")
if args.do_eval:
eval_out = subprocess.check_output([
sys.executable, args.eval_script, args.predict_file,
output_prediction_file
])
scores = str(eval_out).strip()
exact_match = float(scores.split(":")[1].split(",")[0])
f1 = float(scores.split(":")[2].split("}")[0])
dllogger.log(step=tuple(), data={"exact_match": exact_match, "F1": f1})
def main(args):
setup_loggers(args.report_file)
if args.show_config:
print_args(args)
trainer_id = get_trainer_id()
num_trainers = get_num_trainers()
# Set the paddle execute enviroment
fleet.init(is_collective=True)
if args.enable_cpu_affinity:
set_cpu_affinity()
place = paddle.set_device('gpu')
set_seed(args.seed)
dllogger.log(step="PARAMETER", data={"SEED": args.seed})
# Create the main_program for the training and dev_program for the validation
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
tokenizer = BertTokenizer(
vocab_file=args.vocab_file,
do_lower_case=args.do_lower_case,
max_len=512)
with paddle.static.program_guard(main_program, startup_program):
input_ids, segment_ids, start_positions, end_positions, unique_id = create_squad_data_holder(
)
if args.do_train:
train_dataset = SQuAD(
tokenizer=tokenizer,
doc_stride=args.doc_stride,
path=args.train_file,
version_2_with_negative=args.version_2_with_negative,
max_query_length=args.max_query_length,
max_seq_length=args.max_seq_length,
mode="train")
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_dataset, batch_size=args.train_batch_size, shuffle=True)
train_batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # input
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # segment
Stack(), # unique_id
Stack(dtype="int64"), # start_pos
Stack(dtype="int64") # end_pos
): [data for i, data in enumerate(fn(samples)) if i != 2]
train_data_loader = paddle.io.DataLoader(
dataset=train_dataset,
feed_list=[
input_ids, segment_ids, start_positions, end_positions
],
batch_sampler=train_batch_sampler,
collate_fn=train_batchify_fn,
num_workers=0,
return_list=False)
with paddle.static.program_guard(main_program, startup_program):
bert_config = BertConfig.from_json_file(args.config_file)
bert_config.fuse_mha = args.fuse_mha
if bert_config.vocab_size % 8 != 0:
bert_config.vocab_size += 8 - (bert_config.vocab_size % 8)
model = BertForQuestionAnswering(bert_config)
criterion = CrossEntropyLossForSQuAD()
logits = model(input_ids=input_ids, token_type_ids=segment_ids)
if args.do_predict:
dev_program = main_program.clone(for_test=True)
if args.do_train:
loss = criterion(logits, (start_positions, end_positions))
num_train_steps = len(train_data_loader) * args.epochs
if args.max_steps is not None and args.max_steps > 0:
num_train_steps = min(num_train_steps, args.max_steps)
lr_scheduler = Poly(
learning_rate=args.learning_rate, num_steps=num_train_steps)()
optimizer = AdamW(args, learning_rate=lr_scheduler)()
optimizer = dist_optimizer(args, optimizer)
optimizer.minimize(loss)
exe = paddle.static.Executor(place)
exe.run(startup_program)
init_program(
args, program=main_program, exe=exe, model=model, task=Task.squad)
if args.do_train:
dllogger.log(step="PARAMETER", data={"train_start": True})
dllogger.log(step="PARAMETER",
data={
"training_samples":
len(train_data_loader.dataset.examples)
})
dllogger.log(step="PARAMETER",
data={
"training_features":
len(train_data_loader.dataset.features)
})
dllogger.log(step="PARAMETER",
data={"train_batch_size": args.train_batch_size})
dllogger.log(step="PARAMETER", data={"steps": num_train_steps})
global_step = 0
tic_benchmark_begin = 0
tic_benchmark_end = 0
tic_train_begin = time.time()
for epoch in range(args.epochs):
for batch in train_data_loader:
if global_step >= num_train_steps:
break
if args.benchmark and global_step >= args.benchmark_warmup_steps + args.benchmark_steps:
break
loss_return = exe.run(main_program,
feed=batch,
fetch_list=[loss])
lr = lr_scheduler.get_lr()
lr_scheduler.step()
global_step += 1
if args.benchmark and global_step == args.benchmark_warmup_steps:
tic_benchmark_begin = time.time()
if args.benchmark and global_step == args.benchmark_warmup_steps + args.benchmark_steps:
tic_benchmark_end = time.time()
if global_step % args.log_freq == 0:
dllogger_it_data = {
'loss': loss_return[0].item(),
'learning_rate': lr
}
dllogger.log((epoch, global_step), data=dllogger_it_data)
if not args.benchmark:
time_to_train = time.time() - tic_train_begin
dllogger.log(step=tuple(),
data={
"e2e_train_time": time_to_train,
"training_sequences_per_second":
args.train_batch_size * num_train_steps *
num_trainers / time_to_train
})
else:
time_to_benchmark = tic_benchmark_end - tic_benchmark_begin
dllogger.log(step=tuple(),
data={
"training_sequences_per_second":
args.train_batch_size * args.benchmark_steps *
num_trainers / time_to_benchmark
})
if trainer_id == 0:
model_path = os.path.join(args.output_dir, args.bert_model,
"squad")
save_model(main_program, model_path, args.model_prefix)
if args.do_predict and trainer_id == 0:
dev_dataset = SQuAD(
tokenizer=tokenizer,
doc_stride=args.doc_stride,
path=args.predict_file,
version_2_with_negative=args.version_2_with_negative,
max_query_length=args.max_query_length,
max_seq_length=args.max_seq_length,
mode="dev")
dev_batch_sampler = paddle.io.BatchSampler(
dev_dataset, batch_size=args.predict_batch_size, shuffle=False)
dev_batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # input
Pad(axis=0, pad_val=tokenizer.vocab[tokenizer.pad_token]), # segment
Stack() # unique_id
): fn(samples)
dev_data_loader = paddle.io.DataLoader(
dataset=dev_dataset,
feed_list=[input_ids, segment_ids, unique_id],
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
num_workers=0,
return_list=False)
dllogger.log(step="PARAMETER", data={"predict_start": True})
dllogger.log(
step="PARAMETER",
data={"eval_samples": len(dev_data_loader.dataset.examples)})
dllogger.log(
step="PARAMETER",
data={"eval_features": len(dev_data_loader.dataset.features)})
dllogger.log(step="PARAMETER",
data={"predict_batch_size": args.predict_batch_size})
if args.amp:
amp_lists = AutoMixedPrecisionLists(
custom_white_list=['softmax', 'layer_norm', 'gelu'])
rewrite_program(dev_program, amp_lists=amp_lists)
evaluate(args, exe, logits, dev_program, dev_data_loader)
if __name__ == "__main__":
paddle.enable_static()
main(parse_args(Task.squad))
|
PyTorch/Detection/SSD/examples | examples | SSD300_FP32_4GPU | # This script launches SSD300 training in FP32 on 4 GPUs using 128 batch size (32 per GPU)
# Usage ./SSD300_FP32_4GPU.sh <path to this repository> <path to dataset> <additional flags>
torchrun --nproc_per_node=4 $1/main.py --backbone resnet50 --warmup 300 --bs 32 --no-amp --data-layout channels_first --data $2 ${@:3}
|
TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library | library | utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim=0):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim=0):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k, v in all_shapes.items():
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple([min(a, b) for a, b in zip(min_shapes[k], shape)])
max_shapes[k] = tuple([max(a, b) for a, b in zip(max_shapes[k], shape)])
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
|
PyTorch/Classification/ConvNets | ConvNets | launch | import os
from pathlib import Path
from dataclasses import dataclass
from typing import Dict, Any
import yaml
from main import main, add_parser_arguments, available_models
import torch.backends.cudnn as cudnn
import argparse
def get_config_path():
return Path(os.path.dirname(os.path.abspath(__file__))) / "configs.yml"
if __name__ == "__main__":
yaml_cfg_parser = argparse.ArgumentParser(add_help=False)
yaml_cfg_parser.add_argument(
"--cfg_file",
default=get_config_path(),
type=str,
help="path to yaml config file",
)
yaml_cfg_parser.add_argument("--model", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--mode", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--precision", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--platform", default=None, type=str, required=True)
yaml_args, rest = yaml_cfg_parser.parse_known_args()
with open(yaml_args.cfg_file, "r") as cfg_file:
config = yaml.load(cfg_file, Loader=yaml.FullLoader)
cfg = {
**config["precision"][yaml_args.precision],
**config["platform"][yaml_args.platform],
**config["models"][yaml_args.model][yaml_args.platform][yaml_args.precision],
**config["mode"][yaml_args.mode],
}
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
add_parser_arguments(parser)
parser.set_defaults(**cfg)
args, rest = parser.parse_known_args(rest)
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
|
PyTorch/Classification/GPUNet/triton/065ms/runner | runner | start_NVIDIA-DGX-A100-(1x-A100-80GB) | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Evaluate Runner
python3 -m "triton.065ms.runner.__main__" \
--config-path "triton/065ms/runner/config_NVIDIA-DGX-A100-(1x-A100-80GB).yaml" \
--device 0 |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | speechDataBuffer | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "speechDataBuffer.h"
#include "checkedCopy.h"
#include "cudaUtils.h"
#include "cuda_runtime.h"
#include <cassert>
namespace tts
{
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
SpeechDataBuffer::SpeechDataBuffer(
const int inputSpacing,
const int melSpacing,
const int samplesSpacing,
const int maxBatchSize) :
TimedObject("SpeechDataBuffer::copyToDevice()/copyFromDevice()"),
mInputDevice(inputSpacing * maxBatchSize),
mMelsDevice(melSpacing * maxBatchSize),
mSamplesDevice(samplesSpacing * maxBatchSize)
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void SpeechDataBuffer::copyToDevice(const int32_t* const inputHost, const size_t size)
{
if (size > mInputDevice.size())
{
throw std::runtime_error("Cannot copy input larger than device input: " + std::to_string(size) + "/"
+ std::to_string(mInputDevice.size()));
}
startTiming();
CheckedCopy::hostToDevice(mInputDevice.data(), inputHost, size);
stopTiming();
}
void SpeechDataBuffer::copyToDevice(const int batchSize, const std::vector<int32_t>* const inputHost, int& spacing)
{
startTiming();
spacing = 0;
for (int i = 0; i < batchSize; ++i)
{
const int inputSize = static_cast<int>(inputHost[i].size());
if (inputSize > spacing)
{
spacing = inputSize;
}
}
const size_t size = spacing * static_cast<size_t>(batchSize);
if (size > mInputDevice.size())
{
throw std::runtime_error("Cannot copy input larger than device input: " + std::to_string(size) + "/"
+ std::to_string(mInputDevice.size()));
}
cudaStream_t stream;
cudaStreamCreate(&stream);
for (int i = 0; i < batchSize; ++i)
{
CheckedCopy::hostToDeviceAsync(
mInputDevice.data() + (spacing * i),
inputHost[i].data(),
inputHost[i].size(),
stream);
}
CudaUtils::sync(stream);
cudaStreamDestroy(stream);
stopTiming();
}
void SpeechDataBuffer::copyFromDevice(
float* const melsHost, const size_t melsSize, float* const samplesHost, const size_t samplesSize)
{
if (melsHost && melsSize > mMelsDevice.size())
{
throw std::runtime_error("Cannot copy mels larger than device mels: " + std::to_string(melsSize) + "/"
+ std::to_string(mMelsDevice.size()));
}
if (samplesSize > mSamplesDevice.size())
{
throw std::runtime_error("Cannot copy samples larger than device samples: " + std::to_string(samplesSize) + "/"
+ std::to_string(mSamplesDevice.size()));
}
startTiming();
CheckedCopy::deviceToHost(samplesHost, mSamplesDevice.data(), samplesSize);
if (melsHost)
{
CheckedCopy::deviceToHost(melsHost, mMelsDevice.data(), melsSize);
}
stopTiming();
}
void SpeechDataBuffer::copyFromDevice(const int batchSize, std::vector<float>* const samplesHost,
const int sampleSpacing, const int* const samplesLengths)
{
startTiming();
cudaStream_t stream;
cudaStreamCreate(&stream);
for (int i = 0; i < batchSize; ++i)
{
assert(samplesLengths[i] <= sampleSpacing);
samplesHost[i].resize(samplesLengths[i]);
CheckedCopy::deviceToHostAsync(
samplesHost[i].data(),
mSamplesDevice.data() + (sampleSpacing * i),
samplesLengths[i],
stream);
}
CudaUtils::sync(stream);
cudaStreamDestroy(stream);
stopTiming();
}
const int32_t* SpeechDataBuffer::getInputOnDevice() const
{
return mInputDevice.data();
}
float* SpeechDataBuffer::getMelsOnDevice()
{
return mMelsDevice.data();
}
float* SpeechDataBuffer::getSamplesOnDevice()
{
return mSamplesDevice.data();
}
} // namespace tts
|
TensorFlow/Classification/ConvNets/resnet50v1.5/training | training | DGX1_RN50_AMP_90E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnet50 \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \
--batch_size=256 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=3.0517578125e-05 \
--amp --static_loss_scale 128 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
PyTorch/DrugDiscovery/MoFlow/scripts | scripts | prepare_datasets | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
REPO_URL='https://raw.githubusercontent.com/calvin-zcx/moflow'
GIT_HASH='3026b2e9bb8de027f3887deb96ccdd876ba51664'
DATA_DIR="/data"
wget -O "${DATA_DIR}/zinc250k.csv" "${REPO_URL}/${GIT_HASH}/data/zinc250k.csv"
wget -O "${DATA_DIR}/valid_idx_zinc250k.json" "${REPO_URL}/${GIT_HASH}/data/valid_idx_zinc.json"
python ${PWD}/scripts/data_preprocess.py --data_name "zinc250k" --data_dir ${DATA_DIR}
|
PyTorch/Detection/Efficientdet/effdet/config | config | model_config | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
def default_detection_model_configs():
"""Returns a default detection configs."""
h = OmegaConf.create()
# model name.
h.name = 'tf_efficientdet_d1'
h.backbone_name = 'tf_efficientnet_b1'
h.backbone_args = None # FIXME sort out kwargs vs config for backbone creation
# model specific, input preprocessing parameters
h.image_size = 640
# dataset specific head parameters
h.num_classes = 90
# feature + anchor config
h.min_level = 3
h.max_level = 7
h.num_levels = h.max_level - h.min_level + 1
h.num_scales = 3
h.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
h.anchor_scale = 4.0
# FPN and head config
h.pad_type = 'same' # original TF models require an equivalent of Tensorflow 'SAME' padding
h.act_type = 'swish'
h.box_class_repeats = 3
h.fpn_cell_repeats = 3
h.fpn_channels = 88
h.separable_conv = True
h.apply_bn_for_resampling = True
h.conv_after_downsample = False
h.conv_bn_relu_pattern = False
h.use_native_resize_op = False
h.pooling_type = None
h.redundant_bias = True # original TF models have back to back bias + BN layers, not necessary!
h.fpn_name = None
h.fpn_config = None
h.fpn_drop_path_rate = 0. # No stochastic depth in default.
# classification loss (used by train bench)
h.alpha = 0.25
h.gamma = 1.5
# localization loss (used by train bench)
h.delta = 0.1
h.box_loss_weight = 50.0
return h
backbone_config = {
"efficientnet_b0": {
"width_coeff": 1,
"depth_coeff": 1,
"resolution": 224,
"dropout": 0.2,
"checkpoint_path": "./jocbackbone_statedict.pth"
},
"efficientnet_b1": {
"width_coeff": 1,
"depth_coeff": 1.1,
"resolution": 240,
"dropout": 0.2,
"checkpoint_path": ""
},
"efficientnet_b2": {
"width_coeff": 1.1,
"depth_coeff": 1.2,
"resolution": 260,
"dropout": 0.3,
"checkpoint_path": ""
},
"efficientnet_b3": {
"width_coeff": 1.2,
"depth_coeff": 1.4,
"resolution": 300,
"dropout": 0.3,
"checkpoint_path": ""
},
"efficientnet_b4": {
"width_coeff": 1.4,
"depth_coeff": 1.8,
"resolution": 380,
"dropout": 0.4,
"checkpoint_path": "./jocbackbone_statedict_B4.pth"
},
"efficientnet_b5": {
"width_coeff": 1.6,
"depth_coeff": 2.2,
"resolution": 456,
"dropout": 0.4,
"checkpoint_path": ""
},
"efficientnet_b6": {
"width_coeff": 1.8,
"depth_coeff": 2.6,
"resolution": 528,
"dropout": 0.5,
"checkpoint_path": ""
},
"efficientnet_b7": {
"width_coeff": 2.0,
"depth_coeff": 3.1,
"resolution": 600,
"dropout": 0.5,
"checkpoint_path": ""
},
}
efficientdet_model_param_dict = dict(
# Models with PyTorch friendly padding and my PyTorch pretrained backbones, training TBD
efficientdet_d0=dict(
name='efficientdet_d0',
backbone_name='efficientnet_b0',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/efficientdet_d0-f3276ba8.pth',
),
efficientdet_d1=dict(
name='efficientdet_d1',
backbone_name='efficientnet_b1',
image_size=640,
fpn_channels=88,
fpn_cell_repeats=4,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/efficientdet_d1-bb7e98fe.pth',
),
efficientdet_d2=dict(
name='efficientdet_d2',
backbone_name='efficientnet_b2',
image_size=768,
fpn_channels=112,
fpn_cell_repeats=5,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.2),
url='', # no pretrained weights yet
),
efficientdet_d3=dict(
name='efficientdet_d3',
backbone_name='efficientnet_b3',
image_size=896,
fpn_channels=160,
fpn_cell_repeats=6,
box_class_repeats=4,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.2),
url='', # no pretrained weights yet
),
efficientdet_d4=dict(
name='efficientdet_d4',
backbone_name='efficientnet_b4',
image_size=1024,
fpn_channels=224,
fpn_cell_repeats=7,
box_class_repeats=4,
backbone_args=dict(drop_path_rate=0.2),
url='',
),
# My own experimental configs with alternate models, training TBD
# Note: any 'timm' model in the EfficientDet family can be used as a backbone here.
efficientdet_w0=dict(
name='efficientdet_w0', # 'wide'
backbone_name='efficientnet_b0',
image_size=512,
fpn_channels=80,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(
drop_path_rate=0.1,
feature_location='depthwise'), # features from after DW/SE in IR block
url='', # no pretrained weights yet
),
mixdet_m=dict(
name='mixdet_m',
backbone_name='mixnet_m',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
url='', # no pretrained weights yet
),
mixdet_l=dict(
name='mixdet_l',
backbone_name='mixnet_l',
image_size=640,
fpn_channels=88,
fpn_cell_repeats=4,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.2),
url='', # no pretrained weights yet
),
mobiledetv2_110d=dict(
name='mobiledetv2_110d',
backbone_name='mobilenetv2_110d',
image_size=384,
fpn_channels=48,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
act_type='relu6',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.05),
url='', # no pretrained weights yet
),
mobiledetv2_120d=dict(
name='mobiledetv2_120d',
backbone_name='mobilenetv2_120d',
image_size=512,
fpn_channels=56,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
act_type='relu6',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
url='', # no pretrained weights yet
),
mobiledetv3_large=dict(
name='mobiledetv3_large',
backbone_name='mobilenetv3_large_100',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
act_type='hard_swish',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
url='', # no pretrained weights yet
),
# Models ported from Tensorflow with pretrained backbones ported from Tensorflow
tf_efficientdet_d0=dict(
name='tf_efficientdet_d0',
backbone_name='tf_efficientnet_b0',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d0-d92fd44f.pth',
),
tf_efficientdet_d1=dict(
name='tf_efficientdet_d1',
backbone_name='tf_efficientnet_b1',
image_size=640,
fpn_channels=88,
fpn_cell_repeats=4,
box_class_repeats=3,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d1-4c7ebaf2.pth'
),
tf_efficientdet_d2=dict(
name='tf_efficientdet_d2',
backbone_name='tf_efficientnet_b2',
image_size=768,
fpn_channels=112,
fpn_cell_repeats=5,
box_class_repeats=3,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d2-cb4ce77d.pth',
),
tf_efficientdet_d3=dict(
name='tf_efficientdet_d3',
backbone_name='tf_efficientnet_b3',
image_size=896,
fpn_channels=160,
fpn_cell_repeats=6,
box_class_repeats=4,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d3-b0ea2cbc.pth',
),
tf_efficientdet_d4=dict(
name='tf_efficientdet_d4',
backbone_name='tf_efficientnet_b4',
image_size=1024,
fpn_channels=224,
fpn_cell_repeats=7,
box_class_repeats=4,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d4-5b370b7a.pth',
),
tf_efficientdet_d5=dict(
name='tf_efficientdet_d5',
backbone_name='tf_efficientnet_b5',
image_size=1280,
fpn_channels=288,
fpn_cell_repeats=7,
box_class_repeats=4,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d5-ef44aea8.pth',
),
tf_efficientdet_d6=dict(
name='tf_efficientdet_d6',
backbone_name='tf_efficientnet_b6',
image_size=1280,
fpn_channels=384,
fpn_cell_repeats=8,
box_class_repeats=5,
fpn_name='bifpn_sum', # Use unweighted sum for training stability.
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d6-51cb0132.pth'
),
tf_efficientdet_d7=dict(
name='tf_efficientdet_d7',
backbone_name='tf_efficientnet_b6',
image_size=1536,
fpn_channels=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=5.0,
fpn_name='bifpn_sum', # Use unweighted sum for training stability.
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d7_53-6d1d7a95.pth'
),
# The lite configs are in TF automl repository but no weights yet and listed as 'not final'
tf_efficientdet_lite0=dict(
name='tf_efficientdet_lite0',
backbone_name='tf_efficientnet_lite0',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
act_type='relu',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
# unlike other tf_ models, this was not ported from tf automl impl, but trained from tf pretrained efficient lite
# weights using this code, will likely replace if/when official det-lite weights are released
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_lite0-f5f303a9.pth',
),
tf_efficientdet_lite1=dict(
name='tf_efficientdet_lite1',
backbone_name='tf_efficientnet_lite1',
image_size=640,
fpn_channels=88,
fpn_cell_repeats=4,
box_class_repeats=3,
act_type='relu',
backbone_args=dict(drop_path_rate=0.2),
url='', # no pretrained weights yet
),
tf_efficientdet_lite2=dict(
name='tf_efficientdet_lite2',
backbone_name='tf_efficientnet_lite2',
image_size=768,
fpn_channels=112,
fpn_cell_repeats=5,
box_class_repeats=3,
act_type='relu',
backbone_args=dict(drop_path_rate=0.2),
url='',
),
tf_efficientdet_lite3=dict(
name='tf_efficientdet_lite3',
backbone_name='tf_efficientnet_lite3',
image_size=896,
fpn_channels=160,
fpn_cell_repeats=6,
box_class_repeats=4,
act_type='relu',
backbone_args=dict(drop_path_rate=0.2),
url='',
),
tf_efficientdet_lite4=dict(
name='tf_efficientdet_lite4',
backbone_name='tf_efficientnet_lite4',
image_size=1024,
fpn_channels=224,
fpn_cell_repeats=7,
box_class_repeats=4,
act_type='relu',
backbone_args=dict(drop_path_rate=0.2),
url='',
),
)
def get_backbone_config(backbone_name='efficientnet_b0'):
if backbone_name not in backbone_config:
raise Exception("Backbone name {} not supported".format(backbone_name))
return backbone_config[backbone_name]
def get_efficientdet_config(model_name='tf_efficientdet_d1'):
"""Get the default config for EfficientDet based on model name."""
h = default_detection_model_configs()
h.update(efficientdet_model_param_dict[model_name])
return h
def bifpn_sum_config(base_reduction=8):
"""BiFPN config with sum."""
p = OmegaConf.create()
p.nodes = [
{'reduction': base_reduction << 3, 'inputs_offsets': [3, 4]},
{'reduction': base_reduction << 2, 'inputs_offsets': [2, 5]},
{'reduction': base_reduction << 1, 'inputs_offsets': [1, 6]},
{'reduction': base_reduction, 'inputs_offsets': [0, 7]},
{'reduction': base_reduction << 1, 'inputs_offsets': [1, 7, 8]},
{'reduction': base_reduction << 2, 'inputs_offsets': [2, 6, 9]},
{'reduction': base_reduction << 3, 'inputs_offsets': [3, 5, 10]},
{'reduction': base_reduction << 4, 'inputs_offsets': [4, 11]},
]
p.weight_method = 'sum'
return p
def bifpn_attn_config():
"""BiFPN config with fast weighted sum."""
p = bifpn_sum_config()
p.weight_method = 'attn'
return p
def bifpn_fa_config():
"""BiFPN config with fast weighted sum."""
p = bifpn_sum_config()
p.weight_method = 'fastattn'
return p
def get_fpn_config(fpn_name):
if not fpn_name:
fpn_name = 'bifpn_fa'
name_to_config = {
'bifpn_sum': bifpn_sum_config(),
'bifpn_attn': bifpn_attn_config(),
'bifpn_fa': bifpn_fa_config(),
}
return name_to_config[fpn_name]
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer | maintainer | maintainer_factory | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .docker.maintainer import DockerMaintainer
class MaintainerFactory:
@staticmethod
def create_docker_maintainer():
return DockerMaintainer()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | encoderBuilder | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_ENCODERBUILDER_H
#define TT2I_ENCODERBUILDER_H
#include "IModelImporter.h"
#include "trtPtr.h"
#include <string>
namespace nvinfer1
{
class ICudaEngine;
class IBuilder;
} // namespace nvinfer1
namespace tts
{
class EncoderBuilder
{
public:
/**
* @brief Create a new EncoderBuilder.
*
* @param numEmbeddingDimensions The number of dimensions in the embedding.
* @param numEncodingDimensions The number of dimensions in 'memory' output.
* @param numAttentionDimensions The number of dimensions of the 'processed
* memory' output.
* @param inputLength The maximum length of input to support.
*/
EncoderBuilder(const int numEmbeddingDimensions, const int numEncodingDimensions, const int numAttentionDimensions,
const int inputLength);
/**
* @brief Build a Tacotron2 Encoder engine.
*
* @param builder The TRT builder.
* @param importer The weight importer.
* @param maxBatchSize The maximum batch size to support.
* @param useFP16 Whether or not to allow FP16 usage in the build.
*
* @return The built engine.
*/
TRTPtr<nvinfer1::ICudaEngine> build(
nvinfer1::IBuilder& builder,
IModelImporter& importer,
const int maxBatchSize,
const bool useFP16);
private:
int mNumEmbeddingDimensions;
int mNumEncodingDimensions;
int mNumAttentionDimensions;
int mInputLength;
};
} // namespace tts
#endif
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | resnet_v2 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
slim = tf.contrib.slim
resnet_arg_scope = resnet_utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None,
scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_50.default_image_size = resnet_v2.default_image_size
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_101.default_image_size = resnet_v2.default_image_size
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_152.default_image_size = resnet_v2.default_image_size
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_200.default_image_size = resnet_v2.default_image_size
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.