python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the pruner for classification."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/pruner/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO classification model pruner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from nvidia_tao_tf2.blocks.pruner import Pruner
from nvidia_tao_tf2.common.utils import CUSTOM_OBJS
from nvidia_tao_tf2.cv.classification.utils.helper import decode_tltb, decode_eff
from nvidia_tao_tf2.model_optimization.pruning.pruning import prune
from tensorflow import keras
import nvidia_tao_tf2.common.no_warning # noqa pylint: disable=W0611
logger = logging.getLogger(__name__)
class ClassificationPruner(Pruner):
"""Classification pruner class."""
def _load_model(self):
"""Load classification model."""
self.model_path = decode_eff(self.model_path, self.key)
if self.cfg.prune.byom_model_path:
custom_objs = decode_tltb(self.cfg.prune.byom_model_path, self.key)['custom_objs']
CUSTOM_OBJS.update(custom_objs)
# @scha: Although TF SavedModel can mostly train / eval
# models with custom layer w/o the actual implementation,
# pruning require layer configuration. Hence, better to
# specify custom objects while loading
self.model = keras.models.load_model(
self.model_path, custom_objects=CUSTOM_OBJS
)
self.excluded_layers = ['predictions', 'predictions_dense']
self.model.summary()
def _handle_byom_layers(self, excluded_layers):
"""Handle BYOM custom layers."""
byom_custom_layer = set()
# For BYOM Models with custom layer
for layer in self.model.layers:
# Custom layers are automatically added to excluded_layers
# and byom_custom_layer which will automatically update
# TRAVERSABLE_LAYERS in model_optimization/pruning/pruning.py
if 'helper' in str(type(layer)):
excluded_layers.append(layer.name)
byom_custom_layer.add(type(layer))
# Lambda layers in BYOM models are automatically excluded.
if type(layer) == keras.layers.Lambda:
excluded_layers.append(layer.name)
byom_custom_layer = list(byom_custom_layer)
return byom_custom_layer, excluded_layers
def prune(self, threshold, excluded_layers):
"""Run pruning."""
self._load_model()
byom_custom_layer = None
if self.cfg.prune.byom_model_path:
logger.info("Loading BYOM information")
byom_custom_layer, excluded_layers = self._handle_byom_layers(excluded_layers)
# Pruning trained model
pruned_model = prune(
model=self.model,
method='min_weight',
normalizer=self.normalizer,
criterion=self.criterion,
granularity=self.granularity,
min_num_filters=self.min_num_filters,
threshold=threshold,
equalization_criterion=self.equalization_criterion,
excluded_layers=self.excluded_layers + excluded_layers,
byom_custom_layer=byom_custom_layer)
return pruned_model
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/pruner/pruner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
from tensorflow.keras.layers import BatchNormalization
import numpy as np
import pytest
from nvidia_tao_tf2.cv.classification.model.model_builder import get_model
from nvidia_tao_tf2.cv.classification.utils.helper import setup_config
class RegConfig():
"""Class for reg config."""
def __init__(self, reg_type, scope, weight_decay):
self.type = reg_type
self.scope = scope
self.weight_decay = weight_decay
bn_config = (False, True)
@pytest.mark.parametrize("freeze_bn",
bn_config)
def test_freeze_bn(freeze_bn):
keras.backend.clear_session()
model = get_model(
"resnet_10",
input_shape=(3, 224, 224),
data_format="channels_first",
freeze_bn=freeze_bn,
nlayers=18,
use_batch_norm=True,
use_pooling=False,
dropout=0.0,
use_bias=False,
all_projections=False,
)
reg_config = RegConfig("L2", "Conv2D,Dense", 1e-5)
model = setup_config(
model,
reg_config.__dict__,
)
model.compile(
loss="mse",
metrics=['accuracy'],
optimizer="sgd"
)
if freeze_bn:
assert check_freeze_bn(model), (
"BN layers not frozen, expected frozen."
)
else:
assert not check_freeze_bn(model), (
"BN layers frozen, expected not frozen."
)
def check_freeze_bn(model):
"""Check if the BN layers in a model is frozen or not."""
bn_weights = []
for layer in model.layers:
if type(layer) == BatchNormalization:
# only check for moving mean and moving variance
bn_weights.append(layer.get_weights()[2:])
rand_input = np.random.random((1, 3, 224, 224))
# do training several times
out_shape = model.outputs[0].get_shape()[1:]
out_label = np.random.random((1,) + out_shape)
model.train_on_batch(rand_input, out_label)
model.train_on_batch(rand_input, out_label)
model.train_on_batch(rand_input, out_label)
# do prediction several times
model.predict(rand_input)
model.predict(rand_input)
model.predict(rand_input)
# finally, check BN weights
new_bn_weights = []
for layer in model.layers:
if type(layer) == BatchNormalization:
# only check for moving mean and moving variance
new_bn_weights.append(layer.get_weights()[2:])
# check the bn weights
for old_w, new_w in zip(bn_weights, new_bn_weights):
if not np.array_equal(old_w, new_w):
return False
return True
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/tests/test_freeze_bn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""Module containing the unit tests for classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/tests/__init__.py |
"""Utilities for ImageNet data preprocessing & prediction decoding."""
import logging
from tensorflow.keras import backend as K
import numpy as np
logger = logging.getLogger(__name__)
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
# Global tensor of imagenet mean for preprocessing symbolic inputs
_IMAGENET_MEAN = None
# Keras constants.
_KERAS_BACKEND = K
_KERAS_LAYERS = None
_KERAS_MODELS = None
_KERAS_UTILS = None
def get_submodules_from_kwargs(kwargs):
"""Simple function to extract submodules from kwargs."""
backend = kwargs.get('backend', _KERAS_BACKEND)
layers = kwargs.get('layers', _KERAS_LAYERS)
models = kwargs.get('models', _KERAS_MODELS)
utils = kwargs.get('utils', _KERAS_UTILS)
for key in list(kwargs.keys()):
if key not in ['backend', 'layers', 'models', 'utils']:
raise TypeError(f'Invalid keyword argument: {key}')
return backend, layers, models, utils
def _preprocess_numpy_input(x, data_format, mode, color_mode, img_mean, img_depth=8, **kwargs):
"""Preprocesses a Numpy array encoding a batch of images.
# Arguments
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed Numpy array.
"""
assert img_depth in [8, 16], (
f"Unsupported image depth: {img_depth}, should be 8 or 16, "
"please check `model.input_image_depth` in spec file"
)
backend, _, _, _ = get_submodules_from_kwargs(kwargs)
if not issubclass(x.dtype.type, np.floating):
x = x.astype(backend.floatx(), copy=False)
if mode == 'tf':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in tf mode.")
if img_depth == 8:
x /= 127.5
else:
x /= 32767.5
x -= 1.
return x
if mode == 'torch':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in torch mode.")
if img_depth == 8:
x /= 255.
else:
x /= 65535.
if color_mode == "rgb":
assert img_depth == 8, (
f"RGB images only support 8-bit depth, got {img_depth}, "
"please check `model.input_image_depth` in spec file"
)
mean = [0.485, 0.456, 0.406]
std = [0.224, 0.224, 0.224]
elif color_mode == "grayscale":
mean = [0.449]
std = [0.224]
else:
raise NotImplementedError(f"Invalid color mode: {color_mode}")
else:
if color_mode == "rgb":
assert img_depth == 8, (
f"RGB images only support 8-bit depth, got {img_depth}, "
"please check `model.input_image_depth` in spec file"
)
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
if not img_mean:
mean = [103.939, 116.779, 123.68]
else:
assert len(img_mean) == 3, "image_mean must be a list of 3 values \
for RGB input."
mean = img_mean
std = None
else:
if not img_mean:
if img_depth == 8:
mean = [117.3786]
else:
# 117.3786 * 256
mean = [30048.9216]
else:
assert len(img_mean) == 1, "image_mean must be a list of a single value \
for gray image input."
mean = img_mean
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
for idx in range(len(mean)):
if x.ndim == 3:
x[idx, :, :] -= mean[idx]
if std is not None:
x[idx, :, :] /= std[idx]
else:
x[:, idx, :, :] -= mean[idx]
if std is not None:
x[:, idx, :, :] /= std[idx]
else:
for idx in range(len(mean)):
x[..., idx] -= mean[idx]
if std is not None:
x[..., idx] /= std[idx]
return x
def _preprocess_symbolic_input(x, data_format, mode, color_mode, img_mean, img_depth=8, **kwargs):
"""Preprocesses a tensor encoding a batch of images.
# Arguments
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed tensor.
"""
global _IMAGENET_MEAN # noqa pylint: disable=global-statement
assert img_depth in [8, 16], (
f"Unsupported image depth: {img_depth}, should be 8 or 16, "
"please check `model.input_image_depth` in spec file"
)
backend, _, _, _ = get_submodules_from_kwargs(kwargs)
if mode == 'tf':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in tf mode.")
if img_depth == 8:
x /= 127.5
else:
x /= 32767.5
x -= 1.
return x
if mode == 'torch':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in torch mode.")
if img_depth == 8:
x /= 255.
else:
x /= 65535.
if color_mode == "rgb":
assert img_depth == 8, (
f"RGB images only support 8-bit depth, got {img_depth}, "
"please check `model.input_image_depth` in spec file"
)
mean = [0.485, 0.456, 0.406]
std = [0.224, 0.224, 0.224]
elif color_mode == "grayscale":
mean = [0.449]
std = [0.224]
else:
raise NotImplementedError(f"Invalid color mode: {color_mode}")
else:
if color_mode == "rgb":
assert img_depth == 8, (
f"RGB images only support 8-bit depth, got {img_depth}, "
"please check `model.input_image_depth` in spec file"
)
if data_format == 'channels_first':
# 'RGB'->'BGR'
if backend.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
if not img_mean:
mean = [103.939, 116.779, 123.68]
else:
assert len(img_mean) == 3, "image_mean must be a list of 3 values \
for RGB input."
mean = img_mean
std = None
else:
if not img_mean:
if img_depth == 8:
mean = [117.3786]
else:
# 117.3786 * 256
mean = [30048.9216]
else:
assert len(img_mean) == 1, "image_mean must be a list of a single value \
for gray image input."
mean = img_mean
std = None
if _IMAGENET_MEAN is None:
_IMAGENET_MEAN = backend.constant(-np.array(mean))
# Zero-center by mean pixel
if backend.dtype(x) != backend.dtype(_IMAGENET_MEAN):
x = backend.bias_add(
x, backend.cast(_IMAGENET_MEAN, backend.dtype(x)),
data_format=data_format)
else:
x = backend.bias_add(x, _IMAGENET_MEAN, data_format)
if std is not None:
x /= std
return x
def preprocess_input(x, data_format=None, mode='caffe', color_mode="rgb", img_mean=None, img_depth=8, **kwargs):
"""Preprocesses a tensor or Numpy array encoding a batch of images.
# Arguments
x: Input Numpy or symbolic tensor, 3D or 4D.
The preprocessed data is written over the input data
if the data types are compatible. To avoid this
behaviour, `numpy.copy(x)` can be used.
data_format: Data format of the image tensor/array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed tensor or Numpy array.
# Raises
ValueError: In case of unknown `data_format` argument.
"""
backend, _, _, _ = get_submodules_from_kwargs(kwargs)
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(x, data_format=data_format,
mode=mode, color_mode=color_mode,
img_mean=img_mean,
img_depth=img_depth, **kwargs)
return _preprocess_symbolic_input(x, data_format=data_format,
mode=mode, color_mode=color_mode,
img_mean=img_mean, **kwargs)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/utils/preprocess_input.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilitiy module for classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patch keras.utils.image_utils.load_img() with cropping support."""
import random
try:
from PIL import Image as pil_image
try:
pil_image_resampling = pil_image.Resampling
except AttributeError:
pil_image_resampling = pil_image
except ImportError:
pil_image = None
pil_image_resampling = None
if pil_image_resampling is not None:
_PIL_INTERPOLATION_METHODS = {
"nearest": pil_image_resampling.NEAREST,
"bilinear": pil_image_resampling.BILINEAR,
"bicubic": pil_image_resampling.BICUBIC,
"hamming": pil_image_resampling.HAMMING,
"box": pil_image_resampling.BOX,
"lanczos": pil_image_resampling.LANCZOS,
}
import keras
import tensorflow as tf
from nvidia_tao_tf2.cv.classification.utils.helper import color_augmentation
# padding size.
# We firstly resize to (target_width + CROP_PADDING, target_height + CROP_PADDING)
# , then crop to (target_width, target_height).
# for standard ImageNet size: 224x224 the ratio is 0.875(224 / (224 + 32)).
# but for EfficientNet B1-B7, larger resolution is used, hence this ratio
# is no longer 0.875
# ref:
# https://github.com/tensorflow/tpu/blob/r1.15/models/official/efficientnet/preprocessing.py#L110
CROP_PADDING = 32
COLOR_AUGMENTATION = False
def _set_color_augmentation(flag):
global COLOR_AUGMENTATION # pylint: disable=global-statement
COLOR_AUGMENTATION = flag
def load_and_crop_img(path, grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest', keep_aspect_ratio=False):
"""Wraps keras_preprocessing.image.utils.load_img() and adds cropping.
Cropping method enumarated in interpolation
# Arguments
path: Path to image file.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
The desired image format.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation and crop methods used to resample and crop the image
if the target size is different from that of the loaded image.
Methods are delimited by ":" where first part is interpolation and second is crop
e.g. "lanczos:random".
Supported interpolation methods are "nearest", "bilinear", "bicubic", "lanczos",
"box", "hamming" By default, "nearest" is used.
Supported crop methods are "none", "center", "random".
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
# Decode interpolation string. Allowed Crop methods: none, center, random
interpolation, crop = interpolation.split(":") \
if ":" in interpolation else (interpolation, "none")
if crop == "none":
return tf.keras.preprocessing.image.load_img(
path,
grayscale=grayscale,
color_mode=color_mode,
target_size=target_size,
interpolation=interpolation)
# Load original size image using Keras
img = tf.keras.preprocessing.image.load_img(
path,
grayscale=grayscale,
color_mode=color_mode,
target_size=None,
interpolation=interpolation)
# Crop fraction of total image
target_width = target_size[1]
target_height = target_size[0]
if target_size is not None:
if img.size != (target_width, target_height):
if crop not in ["center", "random"]:
raise ValueError(f'Invalid crop method {crop} specified.')
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported ' # noqa pylint: disable=C0209
'methods are {}'.format(
interpolation,
", ".join(
_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
width, height = img.size
if crop == 'random':
# Resize keeping aspect ratio
# result should be no smaller than the targer size, include crop fraction overhead
crop_fraction = random.uniform(0.45, 1.0)
target_size_before_crop = (
target_width / crop_fraction,
target_height / crop_fraction
)
ratio = max(
target_size_before_crop[0] / width,
target_size_before_crop[1] / height
)
target_size_before_crop_keep_ratio = int(width * ratio), int(height * ratio)
img = img.resize(target_size_before_crop_keep_ratio, resample=resample)
if crop == 'center':
# Resize keeping aspect ratio
# result should be no smaller than the larger size, include crop fraction overhead
target_size_before_crop = (
target_width + CROP_PADDING,
target_height + CROP_PADDING
)
ratio = max(
target_size_before_crop[0] / width,
target_size_before_crop[1] / height
)
target_size_before_crop_keep_ratio = int(width * ratio), int(height * ratio)
img = img.resize(target_size_before_crop_keep_ratio, resample=resample)
width, height = img.size
if crop == "center":
left_corner = int(round(width / 2)) - int(round(target_width / 2))
top_corner = int(round(height / 2)) - int(round(target_height / 2))
return img.crop(
(left_corner,
top_corner,
left_corner + target_width,
top_corner + target_height))
if crop == "random":
# random crop
left_shift = random.randint(0, int((width - target_width)))
down_shift = random.randint(0, int((height - target_height)))
img = img.crop(
(left_shift,
down_shift,
target_width + left_shift,
target_height + down_shift))
# color augmentation
if COLOR_AUGMENTATION and img.mode == "RGB":
return color_augmentation(img)
return img
raise ValueError("Crop mode not supported.")
return img
# Monkey patch for TF2
keras.utils.image_utils.load_img = load_and_crop_img
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/utils/preprocess_crop.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixup augmentation."""
import numpy as np
class MixupImageDataGenerator():
"""Mixup image generator."""
def __init__(
self, generator, directory, batch_size,
img_height, img_width, color_mode="rgb",
interpolation="bilinear", alpha=0.2
):
"""Constructor for mixup image data generator.
Arguments:
generator (object): An instance of Keras ImageDataGenerator.
directory (str): Image directory.
batch_size (int): Batch size.
img_height (int): Image height in pixels.
img_width (int): Image width in pixels.
color_mode (string): Color mode of images.
interpolation (string): Interpolation method for resize.
alpha (float): Mixup beta distribution alpha parameter. (default: {0.2})
`generator` (ImageDataGenerator).(default: {None})
"""
self.batch_index = 0
self.batch_size = batch_size
self.alpha = alpha
# First iterator yielding tuples of (x, y)
self.generator = generator.flow_from_directory(
directory,
target_size=(img_height, img_width),
color_mode=color_mode,
batch_size=self.batch_size,
interpolation=interpolation,
shuffle=True,
class_mode='categorical'
)
# Number of images across all classes in image directory.
self.num_samples = self.generator.samples
def reset_index(self):
"""Reset the generator indexes array."""
self.generator._set_index_array()
def on_epoch_end(self):
"""reset index on epoch end."""
self.reset_index()
def reset(self):
"""reset."""
self.batch_index = 0
def __len__(self):
"""length."""
return (self.num_samples + self.batch_size - 1) // self.batch_size
def get_steps_per_epoch(self):
"""Get number of steps per epoch."""
return self.num_samples // self.batch_size
def __next__(self):
"""Get next batch input/output pair.
Returns:
tuple -- batch of input/output pair, (inputs, outputs).
"""
if self.batch_index == 0:
self.reset_index()
current_index = (self.batch_index * self.batch_size) % self.num_samples
if self.num_samples > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
# random sample the lambda value from beta distribution.
if self.alpha > 0:
# Get a pair of inputs and outputs from the batch and its reversed batch.
X1, y1 = self.generator.next()
# in case the dataset has some garbage, the real batch size
# might be smaller than self.batch_size
_l = np.random.beta(self.alpha, self.alpha, X1.shape[0])
_l = np.maximum(_l, 1.0 - _l)
X_l = _l.reshape(X1.shape[0], 1, 1, 1)
y_l = _l.reshape(X1.shape[0], 1)
X2, y2 = np.flip(X1, 0), np.flip(y1, 0)
# Perform the mixup.
X = X1 * X_l + X2 * (1 - X_l)
y = y1 * y_l + y2 * (1 - y_l)
else:
# alpha == 0 essentially disable mixup
X, y = self.generator.next()
return X, y
def __iter__(self):
"""iterator."""
return self
def next(self):
"""Next item."""
while True:
return next(self)
@property
def num_classes(self):
"""number of classes."""
return self.generator.num_classes
@property
def class_indices(self):
"""class indices."""
return self.generator.class_indices
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/utils/mixup_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of helper functions."""
import os
import cv2
import importlib
import json
import warnings
from tensorflow import keras
import tensorflow as tf
from numba import errors
from numba import jit, njit
import numpy as np
from PIL import Image
import tempfile
import zipfile
from eff.core import Archive, File
from eff.callbacks import BinaryContentCallback
from nvidia_tao_tf2.common.utils import (
CUSTOM_OBJS,
MultiGPULearningRateScheduler,
SoftStartCosineAnnealingScheduler,
StepLRScheduler
)
warnings.filterwarnings(action="ignore", category=UserWarning)
warnings.simplefilter('ignore', category=errors.NumbaWarning)
warnings.simplefilter('ignore', category=errors.NumbaDeprecationWarning)
warnings.simplefilter('ignore', category=errors.NumbaPendingDeprecationWarning)
opt_dict = {
'sgd': keras.optimizers.legacy.SGD,
'adam': keras.optimizers.legacy.Adam,
'rmsprop': keras.optimizers.legacy.RMSprop
}
scope_dict = {'dense': keras.layers.Dense,
'conv2d': keras.layers.Conv2D}
regularizer_dict = {'l1': keras.regularizers.l1,
'l2': keras.regularizers.l2}
def initialize():
"""Initializes backend related initializations."""
if tf.config.list_physical_devices('GPU'):
data_format = 'channels_first'
else:
data_format = 'channels_last'
tf.keras.backend.set_image_data_format(data_format)
def build_optimizer(optimizer_config):
"""build optimizer with the optimizer config."""
if optimizer_config.optimizer == "sgd":
return opt_dict["sgd"](
learning_rate=optimizer_config.lr,
momentum=optimizer_config.momentum,
decay=optimizer_config.decay,
nesterov=optimizer_config.nesterov
)
if optimizer_config.optimizer == "adam":
return opt_dict["adam"](
learning_rate=optimizer_config.lr,
beta_1=optimizer_config.beta_1,
beta_2=optimizer_config.beta_2,
epsilon=optimizer_config.epsilon,
decay=optimizer_config.decay
)
if optimizer_config.optimizer == "rmsprop":
return opt_dict["rmsprop"](
learning_rate=optimizer_config.lr,
rho=optimizer_config.rho,
epsilon=optimizer_config.epsilon,
decay=optimizer_config.decay
)
raise ValueError(f"Unsupported Optimizer: {optimizer_config.optimizer}")
def build_lr_scheduler(lr_config, hvd_size, max_iterations):
"""Build a learning rate scheduler from config."""
# Set up the learning rate callback. It will modulate learning rate
# based on iteration progress to reach max_iterations.
if lr_config.scheduler == 'step':
lrscheduler = StepLRScheduler(
base_lr=lr_config.learning_rate * hvd_size,
gamma=lr_config.gamma,
step_size=lr_config.step_size,
max_iterations=max_iterations
)
elif lr_config.scheduler == 'soft_anneal':
lrscheduler = MultiGPULearningRateScheduler(
base_lr=lr_config.learning_rate * hvd_size,
soft_start=lr_config.soft_start,
annealing_points=lr_config.annealing_points,
annealing_divider=lr_config.annealing_divider,
max_iterations=max_iterations
)
elif lr_config.scheduler == 'cosine':
lrscheduler = SoftStartCosineAnnealingScheduler(
base_lr=lr_config.learning_rate * hvd_size,
min_lr_ratio=lr_config.min_lr_ratio,
soft_start=lr_config.soft_start,
max_iterations=max_iterations
)
else:
raise ValueError(
f"Only `step`, `cosine` and `soft_anneal`. LR scheduler are supported, but {lr_config.scheduler} is specified."
)
return lrscheduler
def get_input_shape(model, data_format):
"""Obtain input shape from a Keras model."""
# Computing shape of input tensor
image_shape = model.layers[0].input_shape[0][1:4]
# Setting input shape
if data_format == "channels_first":
nchannels, image_height, image_width = image_shape[0:3]
else:
image_height, image_width, nchannels = image_shape[0:3]
return image_height, image_width, nchannels
@njit
def randu(low, high):
"""standard uniform distribution."""
return np.random.random() * (high - low) + low
@jit
def random_hue(img, max_delta=10.0):
"""Rotates the hue channel.
Args:
img: input image in float32
max_delta: Max number of degrees to rotate the hue channel
"""
# Rotates the hue channel by delta degrees
delta = randu(-max_delta, max_delta)
hsv = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_BGR2HSV)
hchannel = hsv[:, :, 0]
hchannel = delta + hchannel
# hue should always be within [0,360]
idx = np.where(hchannel > 360)
hchannel[idx] = hchannel[idx] - 360
idx = np.where(hchannel < 0)
hchannel[idx] = hchannel[idx] + 360
hsv[:, :, 0] = hchannel
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
@jit
def random_saturation(img, max_shift):
"""random saturation data augmentation."""
hsv = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_BGR2HSV)
shift = randu(-max_shift, max_shift)
# saturation should always be within [0,1.0]
hsv[:, :, 1] = np.clip(hsv[:, :, 1] + shift, 0.0, 1.0)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
@jit
def random_contrast(img, center, max_contrast_scale):
"""random contrast data augmentation."""
new_img = (img - center) * (1.0 + randu(-max_contrast_scale, max_contrast_scale)) + center
new_img = np.clip(new_img, 0., 1.)
return new_img
@jit
def random_shift(x_img, shift_stddev):
"""random shift data augmentation."""
shift = np.random.randn() * shift_stddev
new_img = np.clip(x_img + shift, 0.0, 1.0)
return new_img
def color_augmentation(
x_img,
color_shift_stddev=0.0,
hue_rotation_max=25.0,
saturation_shift_max=0.2,
contrast_center=0.5,
contrast_scale_max=0.1
):
"""color augmentation for images."""
# convert PIL Image to numpy array
x_img = np.array(x_img, dtype=np.float32)
# normalize the image to (0, 1)
x_img /= 255.0
x_img = random_shift(x_img, color_shift_stddev)
x_img = random_hue(x_img, max_delta=hue_rotation_max)
x_img = random_saturation(x_img, saturation_shift_max)
x_img = random_contrast(
x_img,
contrast_center,
contrast_scale_max
)
# convert back to PIL Image
x_img *= 255.0
return Image.fromarray(x_img.astype(np.uint8), "RGB")
def setup_config(model, reg_config, bn_config=None, custom_objs=None):
"""Wrapper for setting up BN and regularizer.
Args:
model (keras Model): a Keras model
reg_config (dict): reg_config dict
bn_config (dict): config to override BatchNormalization parameters
custom_objs (dict): Custom objects for serialization and deserialization.
Return:
A new model with overridden config.
"""
if bn_config is not None:
bn_momentum = bn_config['momentum']
bn_epsilon = bn_config['epsilon']
else:
bn_momentum = 0.9
bn_epsilon = 1e-5
# Obtain the current configuration from model
mconfig = model.get_config()
# Obtain type and scope of the regularizer
reg_type = reg_config['type'].lower()
scope_list = reg_config['scope']
layer_list = [scope_dict[i.lower()] for i in scope_list if i.lower()
in scope_dict]
for layer, layer_config in zip(model.layers, mconfig['layers']):
# BN settings
if type(layer) == keras.layers.BatchNormalization:
layer_config['config']['momentum'] = bn_momentum
layer_config['config']['epsilon'] = bn_epsilon
# Regularizer settings
if reg_type:
if type(layer) in layer_list and \
hasattr(layer, 'kernel_regularizer'):
assert reg_type in ['l1', 'l2', 'none'], \
"Regularizer can only be either L1, L2 or None."
if reg_type in ['l1', 'l2']:
assert 0 < reg_config['weight_decay'] < 1, \
"Weight decay should be no less than 0 and less than 1"
regularizer = regularizer_dict[reg_type](
reg_config['weight_decay'])
layer_config['config']['kernel_regularizer'] = \
{'class_name': regularizer.__class__.__name__,
'config': regularizer.get_config()}
if reg_type == 'none':
layer_config['config']['kernel_regularizer'] = None
if custom_objs:
CUSTOM_OBJS.update(custom_objs)
with keras.utils.CustomObjectScope(CUSTOM_OBJS):
updated_model = keras.models.Model.from_config(mconfig)
updated_model.set_weights(model.get_weights())
return updated_model
def decode_eff(eff_model_path, enc_key=None):
"""Decode EFF to saved_model directory.
Args:
eff_model_path (str): Path to eff model
enc_key (str, optional): Encryption key. Defaults to None.
Returns:
str: Path to the saved_model
"""
# Decrypt EFF
eff_filename = os.path.basename(eff_model_path)
eff_art = Archive.restore_artifact(
restore_path=eff_model_path,
artifact_name=eff_filename,
passphrase=enc_key)
zip_path = eff_art.get_handle()
# Unzip
saved_model_path = os.path.dirname(zip_path)
# TODO(@yuw): try catch
with zipfile.ZipFile(zip_path, "r") as zip_file:
zip_file.extractall(saved_model_path)
return saved_model_path
def deserialize_custom_layers(art):
"""Deserialize the code for custom layer from EFF.
Args:
art (eff.core.artifact.Artifact): Artifact restored from EFF Archive.
Returns:
final_dict (dict): Dictionary representing CUSTOM_OBJS used in the EFF stored Keras model.
"""
# Get class.
source_code = art.get_content()
spec = importlib.util.spec_from_loader('helper', loader=None)
helper = importlib.util.module_from_spec(spec)
exec(source_code, helper.__dict__) # noqa pylint: disable=W0122
final_dict = {}
# Get class name from attributes.
class_names = art["class_names"]
for cn in class_names:
final_dict[cn] = getattr(helper, cn)
return final_dict
def decode_tltb(eff_path, enc_key=None):
"""Restore Keras Model from EFF Archive.
Args:
eff_path (str): Path to the eff file.
enc_key (str): Key to load EFF file.
Returns:
model (keras.models.Model): Loaded keras model.
EFF_CUSTOM_OBJS (dict): Dictionary of custom layers from the eff file.
"""
model_name = os.path.basename(eff_path).split(".")[0]
with Archive.restore_from(restore_path=eff_path, passphrase=enc_key) as restored_effa:
EFF_CUSTOM_OBJS = deserialize_custom_layers(restored_effa.artifacts['custom_layers.py'])
model_name = restored_effa.metadata['model_name']
art = restored_effa.artifacts[f'{model_name}.hdf5']
weights, m = art.get_content()
m = json.loads(m)
with keras.utils.CustomObjectScope(EFF_CUSTOM_OBJS):
model = keras.models.model_from_config(m, custom_objects=EFF_CUSTOM_OBJS)
model.set_weights(weights)
result = {
"model": model,
"custom_objs": EFF_CUSTOM_OBJS,
"model_name": model_name
}
return result
def load_model(model_path, enc_key=None):
"""Load hdf5 or EFF model.
Args:
model_path (str): Path to hdf5 model or eff model
enc_key (str, optional): Encryption key. Defaults to None.
Returns:
Keras model: Loaded model
"""
assert os.path.exists(model_path), f"Pretrained model not found at {model_path}"
if model_path.endswith('.tlt'):
model_path = decode_eff(model_path, enc_key)
return tf.keras.models.load_model(model_path)
if model_path.endswith('.tltb'):
out_dict = decode_tltb(model_path, enc_key)
model = out_dict['model']
return model
return tf.keras.models.load_model(model_path, custom_objects=CUSTOM_OBJS)
def zipdir(src, zip_path):
"""Function creates zip archive from src in dst location.
Args:
src: Path to directory to be archived.
dst: Path where archived dir will be stored.
"""
# destination directory
os.chdir(os.path.dirname(zip_path))
# zipfile handler
with zipfile.ZipFile(zip_path, "w") as zf:
# writing content of src directory to the archive
for root, _, filenames in os.walk(src):
for filename in filenames:
zf.write(
os.path.join(root, filename),
arcname=os.path.join(root.replace(src, ""), filename))
def encode_eff(filepath, eff_model_path, enc_key):
"""Encode saved_model directory into a .tlt file.
Args:
filepath (str): Path to saved_model
eff_model_path (str): Path to the output EFF file
enc_key (str): Encrytion key
"""
os_handle, temp_zip_file = tempfile.mkstemp()
os.close(os_handle)
# create zipfile from saved_model directory
zipdir(filepath, temp_zip_file)
# create artifacts from zipfile
eff_filename = os.path.basename(eff_model_path)
zip_art = File(
name=eff_filename,
description="Artifact from checkpoint",
filepath=temp_zip_file,
encryption=bool(enc_key),
content_callback=BinaryContentCallback,
)
Archive.save_artifact(
save_path=eff_model_path, artifact=zip_art, passphrase=enc_key)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/utils/helper.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for processing config file to run classification pipelines."""
def spec_checker(cfg):
"""Hydra config checker."""
assert cfg.model.input_channels in [1, 3], "Invalid input image dimension."
assert cfg.model.input_height >= 16, "Image height should be greater than 15 pixels."
assert cfg.model.input_width >= 16, "Image width should be greater than 15 pixels."
assert cfg.model.input_image_depth in [8, 16], "Only 8-bit and 16-bit images are supported"
assert cfg.dataset.num_classes > 1, \
"Number of classes should be greater than 1. Consider adding a background class."
assert cfg.prune.equalization_criterion in \
['arithmetic_mean', 'geometric_mean', 'union', 'intersection'], \
"Equalization criterion are [arithmetic_mean, geometric_mean, union, \
intersection]."
assert cfg.prune.normalizer in ['L2', 'max'], \
"normalizer options are [L2, max]."
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/utils/config_utils.py |
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
"""Pipeline scripts for classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a classification model."""
import os
import logging
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.utils import update_results_dir
from nvidia_tao_tf2.cv.classification.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.classification.export.classification_exporter import Exporter
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', level='INFO')
logger = logging.getLogger(__name__)
@monitor_status(name='classification', mode='export')
def run_export(cfg=None):
"""Export classification model to etlt."""
logger.setLevel(logging.INFO)
if not os.path.exists(cfg.results_dir):
os.makedirs(cfg.results_dir, exist_ok=True)
exporter = Exporter(config=cfg)
exporter.export()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="export", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for continuous training of classification application."""
cfg = update_results_dir(cfg, 'export')
run_export(cfg=cfg)
if __name__ == "__main__":
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference with classification tensorrt engine."""
import os
import logging
import numpy as np
from PIL import ImageFile
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_tf2.cv.classification.inferencer.trt_inferencer import TRTInferencer
from nvidia_tao_tf2.cv.classification.config.default_config import ExperimentConfig
ImageFile.LOAD_TRUNCATED_IMAGES = True
logger = logging.getLogger(__name__)
SUPPORTED_IMAGE_FORMAT = ['.jpg', '.png', '.jpeg']
def run_inference(cfg):
"""Wrapper function to run evaluation of classification model.
Args:
Dictionary arguments containing parameters parsed in the main function.
"""
# Set up logger verbosity.
verbosity = 'INFO'
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
level=verbosity)
# set backend
# initialize()
predictions = []
inferencer = TRTInferencer(cfg['inference']['checkpoint'], batch_size=1,
data_format=cfg['data_format'],
img_depth=cfg['model']['input_image_depth'])
for img_name in os.listdir(cfg['inference']['image_dir']):
_, ext = os.path.splitext(img_name)
if ext.lower() in SUPPORTED_IMAGE_FORMAT:
result = inferencer.infer_single(
os.path.join(cfg['inference']['image_dir'], img_name))
# print(result)
predictions.append(np.argmax(result))
break
print(predictions)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="infer", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for continuous training of classification application."""
run_inference(cfg)
logger.info("Inference finished successfully.")
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/scripts/inference_trt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification training script with protobuf configuration."""
from functools import partial
import json
import logging
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image, ImageFile
import horovod.tensorflow.keras as hvd
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_tf2.common.mlops.utils import init_mlops
from nvidia_tao_tf2.common.utils import set_random_seed, update_results_dir
from nvidia_tao_tf2.cv.classification.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.classification.model.classifier_module import ClassifierModule
from nvidia_tao_tf2.cv.classification.model.callback_builder import setup_callbacks
from nvidia_tao_tf2.cv.classification.trainer.classifier_trainer import ClassifierTrainer
from nvidia_tao_tf2.cv.classification.utils.config_utils import spec_checker
from nvidia_tao_tf2.cv.classification.utils.mixup_generator import MixupImageDataGenerator
from nvidia_tao_tf2.cv.classification.utils.preprocess_input import preprocess_input
from nvidia_tao_tf2.cv.classification.utils import preprocess_crop # noqa pylint: disable=unused-import
ImageFile.LOAD_TRUNCATED_IMAGES = True
Image.MAX_IMAGE_PIXELS = 9000000000
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', level='INFO')
logger = logging.getLogger(__name__)
def setup_env(cfg):
"""Setup training env."""
# Horovod: initialize Horovod.
hvd.init()
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
# Configure the logger.
logger.setLevel(logging.INFO)
# Set random seed.
seed = cfg.train.random_seed + hvd.rank()
set_random_seed(seed)
logger.debug("Random seed is set to %d", seed)
# Create results dir
if hvd.rank() == 0:
if not os.path.exists(cfg.results_dir):
os.makedirs(cfg.results_dir, exist_ok=True)
init_mlops(cfg, name='classification')
def load_data(train_data,
val_data,
cfg,
batch_size=8,
enable_random_crop=False,
enable_center_crop=False,
enable_color_augmentation=False,
interpolation='bicubic',
num_classes=1000,
mixup_alpha=0.0,
no_horizontal_flip=False,
data_format='channels_first'):
"""Load training and validation data with default data augmentation.
Args:
train_data (str): path to the training data.
val_data (str): path to the validation data.
preprocessing_func: function to process an image.
batch_size (int): Number of image tensors per batch.
enable_random_crop (bool): Flag to enable random cropping in load_img.
enable_center_crop (bool): Flag to enable center cropping for val.
interpolation(str): Interpolation method for image resize. choices: `bilinear` or `bicubic`.
num_classes (int): Number of classes.
mixup_alpha (float): mixup alpha.
no_horizontal_flip(bool): Flag to disable horizontal flip for
direction-aware datasets.
Return:
train/val Iterators and number of classes in the dataset.
"""
image_depth = cfg.model.input_image_depth
color_mode = "rgb" if cfg.model.input_channels == 3 else "grayscale"
preprocessing_func = partial(
preprocess_input,
data_format=cfg.data_format,
mode=cfg.dataset.preprocess_mode,
img_mean=list(cfg.dataset.image_mean),
color_mode=color_mode,
img_depth=image_depth)
preprocess_crop._set_color_augmentation(enable_color_augmentation)
# set color augmentation properly for train.
# this global var will not affect validation dataset because
# the crop method is either "none" or "center" for val dataset,
# while this color augmentation is only possible for "random" crop.
# Initializing data generator : Train
train_datagen = ImageDataGenerator(
preprocessing_function=preprocessing_func,
horizontal_flip=not no_horizontal_flip,
featurewise_center=False,
data_format=data_format)
train_iterator = MixupImageDataGenerator(
train_datagen, train_data, batch_size,
cfg.model.input_height, cfg.model.input_width,
color_mode=color_mode,
interpolation=interpolation + ':random' if enable_random_crop else interpolation,
alpha=mixup_alpha
)
logger.info('Processing dataset (train): %s', train_data)
# Initializing data generator: Val
val_datagen = ImageDataGenerator(
preprocessing_function=preprocessing_func,
horizontal_flip=False,
data_format=data_format)
# Initializing data iterator: Val
val_iterator = val_datagen.flow_from_directory(
val_data,
target_size=(cfg.model.input_height, cfg.model.input_width),
color_mode=color_mode,
batch_size=batch_size,
interpolation=interpolation + ':center' if enable_center_crop else interpolation,
shuffle=False,
class_mode='categorical')
logger.info('Processing dataset (validation): %s', val_data)
# Check if the number of classes is consistent
assert train_iterator.num_classes == val_iterator.num_classes == num_classes, \
f"The number of classes in the training set ({train_iterator.num_classes})" + \
f"or validation set ({val_iterator.num_classes}) doesn't match num_classes ({num_classes})."
return train_iterator, val_iterator
@monitor_status(name='classification', mode='training')
def run_experiment(cfg):
"""Launch training experiment."""
spec_checker(cfg)
# Load augmented data
train_iterator, val_iterator = load_data(
cfg.dataset.train_dataset_path,
cfg.dataset.val_dataset_path,
cfg=cfg,
batch_size=cfg.train.batch_size_per_gpu,
enable_random_crop=cfg.dataset.augmentation.enable_random_crop,
enable_center_crop=cfg.dataset.augmentation.enable_center_crop,
enable_color_augmentation=cfg.dataset.augmentation.enable_color_augmentation,
interpolation=cfg.model.resize_interpolation_method,
num_classes=cfg.dataset.num_classes,
mixup_alpha=cfg.dataset.augmentation.mixup_alpha,
no_horizontal_flip=cfg.dataset.augmentation.disable_horizontal_flip,
data_format=cfg.data_format)
# Initialize classifier module
steps_per_epoch = (len(train_iterator) + hvd.size() - 1) // hvd.size()
classifier = ClassifierModule(cfg, steps_per_epoch)
# Setup callbacks
callbacks = setup_callbacks(
cfg.train.checkpoint_interval,
cfg.results_dir,
cfg.train.lr_config,
classifier.initial_epoch + 1,
classifier.steps_per_epoch,
cfg.train.num_epochs,
cfg.encryption_key)
# Writing out class-map file for inference mapping
if hvd.rank() == 0:
with open(os.path.join(cfg.results_dir, "classmap.json"), "w", encoding='utf-8') as f:
json.dump(train_iterator.class_indices, f)
logger.info('classmap.json is generated at %s', cfg.results_dir)
# Initialize classifier trainer
trainer = ClassifierTrainer(
num_epochs=cfg.train.num_epochs,
callbacks=callbacks,
cfg=cfg)
trainer.fit(
module=classifier,
train_dataset=train_iterator,
eval_dataset=val_iterator,
verbose=1 if hvd.rank() == 0 else 0
)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="train", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for continuous training of classification application."""
cfg = update_results_dir(cfg, 'train')
setup_env(cfg)
run_experiment(cfg=cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prune the classification TAO model."""
import logging
import os
import tempfile
import tensorflow as tf
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
import nvidia_tao_tf2.common.logging.logging as status_logging
from nvidia_tao_tf2.common.utils import get_model_file_size, update_results_dir
from nvidia_tao_tf2.cv.classification.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.classification.pruner.pruner import ClassificationPruner
from nvidia_tao_tf2.cv.classification.utils.helper import encode_eff
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.get_logger().setLevel('ERROR')
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', level='INFO')
logger = logging.getLogger(__name__)
@monitor_status(name='classification', mode='pruning')
def run_pruning(cfg):
"""Prune an encrypted Keras model."""
logger.setLevel(logging.INFO)
if not os.path.exists(cfg.results_dir):
os.makedirs(cfg.results_dir, exist_ok=True)
pruner = ClassificationPruner(cfg)
# Pruning trained model
pruned_model = pruner.prune(
threshold=cfg.prune.threshold,
excluded_layers=list(cfg.prune.excluded_layers))
# Save the encrypted pruned model
tmp_saved_model = tempfile.mkdtemp()
pruned_model.save(tmp_saved_model)
# Convert to EFF
output_path = os.path.join(
cfg.prune.results_dir,
f'model_th={cfg.prune.threshold}_eq={cfg.prune.equalization_criterion}.tlt')
encode_eff(tmp_saved_model, output_path, cfg.encryption_key)
# Printing out pruned model summary
logger.info("Model summary of the pruned model:")
logger.info(pruned_model.summary())
pruning_ratio = pruned_model.count_params() / pruner.model.count_params()
logger.info("Pruning ratio (pruned model / original model): %s", pruning_ratio)
status_logging.get_status_logger().kpi.update(
{'pruning_ratio': float(pruning_ratio),
'param_count': pruned_model.count_params(),
'size': get_model_file_size(output_path)})
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="prune", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for classification pruning."""
cfg = update_results_dir(cfg, 'prune')
run_pruning(cfg=cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference and metrics computation code using a loaded model."""
import json
import logging
import os
import tqdm
import numpy as np
import pandas as pd
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.utils import update_results_dir
from nvidia_tao_tf2.cv.classification.inferencer.keras_inferencer import KerasInferencer
from nvidia_tao_tf2.cv.classification.config.default_config import ExperimentConfig
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', level='INFO')
logger = logging.getLogger(__name__)
SUPPORTED_IMAGE_FORMAT = ['.jpg', '.png', '.jpeg']
@monitor_status(name='classification', mode='inference')
def run_inference(cfg):
"""Inference on a directory of images using a pretrained model file.
Args:
cfg: Hydra config.
Log:
Directory Mode:
write out a .csv file to store all the predictions
"""
logger.setLevel(logging.INFO)
if not os.path.exists(cfg.results_dir):
os.makedirs(cfg.results_dir, exist_ok=True)
result_csv_path = os.path.join(cfg.results_dir, 'result.csv')
with open(cfg.inference.classmap, "r", encoding='utf-8') as cm:
class_dict = json.load(cm)
reverse_mapping = {v: k for k, v in class_dict.items()}
image_depth = cfg.model.input_image_depth
assert image_depth in [8, 16], "Only 8-bit and 16-bit images are supported"
interpolation = cfg.model.resize_interpolation_method
if cfg.dataset.augmentation.enable_center_crop:
interpolation += ":center"
inferencer = KerasInferencer(
cfg.inference.checkpoint,
key=cfg.encryption_key,
img_mean=list(cfg.dataset.image_mean),
preprocess_mode=cfg.dataset.preprocess_mode,
interpolation=interpolation,
img_depth=image_depth,
data_format=cfg.data_format)
predictions = []
imgpath_list = [os.path.join(root, filename)
for root, subdirs, files in os.walk(cfg.inference.image_dir)
for filename in files
if os.path.splitext(filename)[1].lower()
in SUPPORTED_IMAGE_FORMAT
]
for img_name in tqdm.tqdm(imgpath_list):
raw_predictions = inferencer.infer_single(img_name)
class_index = np.argmax(raw_predictions)
class_labels = reverse_mapping[class_index]
class_conf = np.max(raw_predictions)
predictions.append((img_name, class_labels, class_conf))
with open(result_csv_path, 'w', encoding='utf-8') as csv_f:
# Write predictions to file
df = pd.DataFrame(predictions)
df.to_csv(csv_f, header=False, index=False)
logger.info("The inference result is saved at: %s", result_csv_path)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="infer", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for classification inference."""
cfg = update_results_dir(cfg, 'inference')
run_inference(cfg)
if __name__ == "__main__":
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform classification evaluation."""
import os
from functools import partial
import logging
import json
import sys
import numpy as np
from PIL import ImageFile
from sklearn.metrics import classification_report, confusion_matrix
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
import nvidia_tao_tf2.common.logging.logging as status_logging
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.utils import update_results_dir
from nvidia_tao_tf2.cv.classification.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.classification.utils import preprocess_crop # noqa pylint: disable=unused-import
from nvidia_tao_tf2.cv.classification.utils.preprocess_input import preprocess_input
from nvidia_tao_tf2.cv.classification.utils.helper import get_input_shape, load_model
ImageFile.LOAD_TRUNCATED_IMAGES = True
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s', level='INFO')
logger = logging.getLogger(__name__)
@monitor_status(name='classification', mode='evaluation')
def run_evaluate(cfg):
"""Wrapper function to run evaluation of classification model.
Args:
Dictionary arguments containing parameters parsed in the main function.
"""
# Set up logger verbosity.
logger.setLevel(logging.INFO)
if not os.path.exists(cfg.results_dir):
os.makedirs(cfg.results_dir, exist_ok=True)
# Decrypt EFF
final_model = load_model(
str(cfg.evaluate.checkpoint),
cfg.encryption_key)
# Defining optimizer
opt = tf.keras.optimizers.legacy.SGD(lr=0, decay=1e-6, momentum=0.9, nesterov=False)
# Define precision/recall and F score metrics
topk_acc = partial(tf.keras.metrics.top_k_categorical_accuracy,
k=cfg['evaluate']['top_k'])
topk_acc.__name__ = 'topk_acc'
# Compile model
final_model.compile(loss='categorical_crossentropy',
metrics=[topk_acc],
optimizer=opt)
# print model summary
final_model.summary()
# Get input shape
image_height, image_width, nchannels = get_input_shape(final_model, cfg.data_format)
image_depth = cfg['model']['input_image_depth']
assert image_depth in [8, 16], "Only 8-bit and 16-bit images are supported"
assert nchannels in [1, 3], (
f"Unsupported channel count {nchannels} for evaluation"
)
logger.debug('input req HWC and depth: %s, %s, %s, %s', image_height, image_width, nchannels, image_depth)
color_mode = "rgb"
if nchannels == 1:
color_mode = "grayscale"
interpolation = cfg.model.resize_interpolation_method
if cfg.dataset.augmentation.enable_center_crop:
interpolation += ":center"
# Initializing data generator
target_datagen = ImageDataGenerator(
preprocessing_function=partial(preprocess_input,
data_format=cfg.data_format,
mode=cfg.dataset.preprocess_mode,
img_mean=list(cfg.dataset.image_mean),
color_mode=color_mode,
img_depth=image_depth),
horizontal_flip=False,
data_format=cfg['data_format'])
if cfg.evaluate.classmap:
# If classmap is provided, then we explicitly set it in ImageDataGenerator
with open(cfg.evaluate.classmap, "r", encoding='utf-8') as cmap_file:
try:
data = json.load(cmap_file)
except json.decoder.JSONDecodeError as e:
print(f"Loading the {cfg.evaluate.classmap} failed with error\n{e}")
sys.exit(-1)
except Exception as e:
if e.output is not None:
print(f"Evaluation failed with error {e.output}")
sys.exit(-1)
if not data:
class_names = None
logger.info('classmap is not loaded.')
else:
class_names = [""] * len(list(data.keys()))
if not all([class_index < len(class_names) and isinstance(class_index, int) # noqa pylint: disable=R1729
for class_index in data.values()]):
raise RuntimeError(
"Invalid data in the json file. The class index must "
"be < number of classes and an integer value.")
for class_name, class_index in data.items():
class_names[class_index] = class_name
logger.info('classmap is loaded successfully.')
else:
class_names = None
# Initializing data iterator
target_iterator = target_datagen.flow_from_directory(
cfg['evaluate']['dataset_path'],
target_size=(image_height, image_width),
color_mode=color_mode,
batch_size=cfg['evaluate']['batch_size'],
class_mode='categorical',
interpolation=interpolation,
shuffle=False)
logger.info('Processing dataset (evaluation): {}'.format(cfg['evaluate']['dataset_path'])) # noqa pylint: disable=C0209
nclasses = target_iterator.num_classes
assert nclasses > 1, "Invalid number of classes in the evaluation dataset."
# If number of classes does not match the new data
assert nclasses == final_model.output.get_shape().as_list()[-1], \
"The number of classes of the loaded model doesn't match the \
number of classes in the evaluation dataset."
# Evaluate the model on the full data set.
score = final_model.evaluate(target_iterator,
steps=len(target_iterator),
workers=cfg['evaluate']['n_workers'],
use_multiprocessing=False)
logger.info('Evaluation Loss: %s', score[0])
logger.info('Evaluation Top %s accuracy: %s', cfg['evaluate']['top_k'], score[1])
status_logging.get_status_logger().kpi['loss'] = float(score[0])
status_logging.get_status_logger().kpi['top_k'] = float(score[1])
# Re-initializing data iterator
target_iterator = target_datagen.flow_from_directory(
cfg['evaluate']['dataset_path'],
target_size=(image_height, image_width),
batch_size=cfg['evaluate']['batch_size'],
color_mode=color_mode,
class_mode='categorical',
interpolation=interpolation,
shuffle=False)
logger.info("Calculating per-class P/R and confusion matrix. It may take a while...")
Y_pred = final_model.predict_generator(target_iterator, len(target_iterator), workers=1)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(target_iterator.classes, y_pred))
print('Classification Report')
class_dict = target_iterator.class_indices
target_keys_names = list(sorted(class_dict.items(), key=lambda x: x[1]))
target_keys_names = list(zip(*target_keys_names))
print(classification_report(target_iterator.classes, y_pred, labels=target_keys_names[1], target_names=target_keys_names[0]))
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="eval", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for continuous training of classification application."""
cfg = update_results_dir(cfg, 'evaluate')
run_evaluate(cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define entrypoint to run tasks for classification."""
import argparse
from nvidia_tao_tf2.cv.classification import scripts
from nvidia_tao_tf2.common.entrypoint.entrypoint import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"classification",
add_help=True,
description="Train Adapt Optimize Toolkit entrypoint for classification"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, task="classification_tf2")
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/entrypoint/classification.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for classification."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO classification model builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras.layers import AveragePooling2D, Dense, Flatten
from tensorflow.keras.layers import Activation, Input
from tensorflow.keras.models import Model
from nvidia_tao_tf2.backbones.efficientnet_tf import (
EfficientNetB0,
EfficientNetB1,
EfficientNetB2,
EfficientNetB3,
EfficientNetB4,
EfficientNetB5,
EfficientNetB6,
EfficientNetB7
)
from nvidia_tao_tf2.backbones.resnet_tf import ResNet
from nvidia_tao_tf2.backbones.mobilenet_tf import MobileNet, MobileNetV2
from nvidia_tao_tf2.cv.classification.utils.helper import decode_tltb
SUPPORTED_ARCHS = [
"resnet", "efficientnet-b0", "efficientnet-b1",
"efficientnet-b2", "efficientnet-b3",
"efficientnet-b4", "efficientnet-b5",
"efficientnet-b6", "efficientnet-b7",
"mobilenet_v1", "mobilenet_v2", "byom"
]
def add_dense_head(nclasses, base_model, data_format, kernel_regularizer, bias_regularizer):
"""Wrapper to add dense head to the backbone structure."""
output = base_model.output
output_shape = output.get_shape().as_list()
if data_format == 'channels_first':
pool_size = (output_shape[-2], output_shape[-1])
else:
pool_size = (output_shape[-3], output_shape[-2])
output = AveragePooling2D(pool_size=pool_size, name='avg_pool',
data_format=data_format, padding='valid')(output)
output = Flatten(name='flatten')(output)
# updated per TF2 documentation (https://www.tensorflow.org/guide/mixed_precision)
output = Dense(nclasses, name='predictions_dense',
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)(output)
output = Activation('softmax', dtype='float32', name='predictions')(output)
final_model = Model(inputs=base_model.input, outputs=output, name=base_model.name)
return final_model
def get_resnet(nlayers=18,
input_shape=(3, 224, 224),
data_format='channels_first',
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
all_projections=True,
use_batch_norm=True,
use_pooling=False,
retain_head=False,
use_bias=True,
freeze_bn=False,
freeze_blocks=None,
input_name="Input"):
"""Wrapper to get ResNet backbone."""
input_image = Input(shape=input_shape, name=input_name)
final_model = ResNet(nlayers=nlayers,
input_tensor=input_image,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
activation_type='relu',
all_projections=all_projections,
use_pooling=use_pooling,
add_head=retain_head,
nclasses=nclasses,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
use_bias=use_bias)
if not retain_head:
final_model = add_dense_head(nclasses, final_model,
data_format, kernel_regularizer,
bias_regularizer)
return final_model
def get_byom(model_config_path=None,
input_shape=(3, 224, 224),
freeze_blocks=None,
passphrase=None,
data_format='channels_first',
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
input_name='Input'):
"""Wrapper to get Bring Your Own Model from json file."""
# For BYOM, we don't have the code for the model archicture.
# As a result, we must load from eff file
out_dict = decode_tltb(model_config_path, enc_key=passphrase)
final_model = out_dict['model']
# Rename the model to be more meaningful
final_model._name = out_dict['model_name']
if not retain_head:
final_model = add_dense_head(nclasses, final_model,
data_format, kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b0(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None,
input_name="Input"
):
"""Get an EfficientNet B0 model."""
input_image = Input(shape=input_shape, name=input_name)
final_model = EfficientNetB0(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b1(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None,
input_name="Input"
):
"""Get an EfficientNet B1 model."""
input_image = Input(shape=input_shape, name=input_name)
final_model = EfficientNetB1(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b2(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None,
input_name="Input"
):
"""Get an EfficientNet B2 model."""
input_image = Input(shape=input_shape, name=input_name)
final_model = EfficientNetB2(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b3(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None,
input_name="Input"
):
"""Get an EfficientNet B3 model."""
input_image = Input(shape=input_shape, name=input_name)
final_model = EfficientNetB3(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b4(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None,
input_name="Input"
):
"""Get an EfficientNet B4 model."""
input_image = Input(shape=input_shape, name=input_name)
final_model = EfficientNetB4(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b5(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None,
input_name="Input"
):
"""Get an EfficientNet B5 model."""
input_image = Input(shape=input_shape, name=input_name)
final_model = EfficientNetB5(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b6(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None,
input_name="Input"
):
"""Get an EfficientNet B6 model."""
input_image = Input(shape=input_shape, name=input_name)
final_model = EfficientNetB6(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b7(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None,
input_name="Input"
):
"""Get an EfficientNet B7 model."""
input_image = Input(shape=input_shape, name=input_name)
final_model = EfficientNetB7(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_mobilenet(input_shape=None,
data_format='channels_first',
nclasses=1000,
use_batch_norm=None,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
use_bias=True,
freeze_bn=False,
freeze_blocks=None,
stride=32,
input_name="Input"):
"""Wrapper to get MobileNet model from IVA templates."""
input_image = Input(shape=input_shape, name=input_name)
final_model = MobileNet(inputs=input_image,
input_shape=input_shape,
dropout=0.0,
add_head=retain_head,
stride=stride,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
nclasses=nclasses,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_mobilenet_v2(input_shape=None,
data_format='channels_first',
nclasses=1000,
use_batch_norm=None,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
all_projections=False,
use_bias=True,
freeze_bn=False,
freeze_blocks=None,
stride=32,
input_name="Input"):
"""Wrapper to get MobileNet V2 model from IVA templates."""
input_image = Input(shape=input_shape, name=input_name)
final_model = MobileNetV2(inputs=input_image,
input_shape=input_shape,
add_head=retain_head,
stride=stride,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
all_projections=all_projections,
nclasses=nclasses,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
# defining model dictionary
model_choose = {"resnet": get_resnet,
"efficientnet-b0": get_efficientnet_b0,
"efficientnet-b1": get_efficientnet_b1,
"efficientnet-b2": get_efficientnet_b2,
"efficientnet-b3": get_efficientnet_b3,
"efficientnet-b4": get_efficientnet_b4,
"efficientnet-b5": get_efficientnet_b5,
"efficientnet-b6": get_efficientnet_b6,
"efficientnet-b7": get_efficientnet_b7,
"mobilenet_v1": get_mobilenet,
"mobilenet_v2": get_mobilenet_v2,
"byom": get_byom}
def get_model(backbone="resnet_18",
input_shape=(3, 224, 224),
data_format=None,
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_blocks=None,
**kwargs):
"""Wrapper to choose feature extractor given backbone name."""
kwa = {}
if 'resnet' in backbone:
kwa['nlayers'] = int(backbone.split('_')[-1])
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_pooling'] = kwargs['use_pooling']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['use_bias'] = kwargs['use_bias']
kwa['all_projections'] = kwargs['all_projections']
backbone = backbone.split('_')[0]
elif 'efficientnet-b' in backbone:
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation_type']
elif 'mobilenet_v1' == backbone:
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
elif 'mobilenet_v2' == backbone:
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['all_projections'] = kwargs['all_projections']
elif 'byom' == backbone:
kwa['model_config_path'] = kwargs['model_config_path']
kwa['passphrase'] = kwargs['passphrase']
else:
raise ValueError(f'Unsupported architecture: {backbone}')
model = model_choose[backbone](
input_shape=input_shape,
nclasses=nclasses,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
retain_head=retain_head,
freeze_blocks=freeze_blocks,
input_name='Input',
**kwa)
return model
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/model/model_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build models for classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification callback builder."""
import os
import horovod.tensorflow.keras as hvd
from wandb.keras import WandbCallback
from tensorflow.keras.callbacks import TensorBoard
from nvidia_tao_tf2.common.mlops.wandb import is_wandb_initialized
from nvidia_tao_tf2.cv.classification.callback.csv_callback import CSVLoggerWithStatus
from nvidia_tao_tf2.cv.classification.callback.eff_checkpoint import EffCheckpoint
from nvidia_tao_tf2.cv.classification.utils.helper import build_lr_scheduler
def setup_callbacks(ckpt_freq, results_dir, lr_config,
init_epoch, iters_per_epoch, max_epoch, key):
"""Setup callbacks: tensorboard, checkpointer, lrscheduler, csvlogger.
Args:
ckpt_freq (int): checkpoint and validation frequency.
results_dir (str): Path to a folder where various training outputs will
be written.
init_epoch (int): The number of epoch to resume training.
key (str): encryption key
Returns:
callbacks (list of keras.callbacks): list of callbacks.
"""
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
hvd.callbacks.MetricAverageCallback()]
max_iterations = iters_per_epoch * max_epoch
lrscheduler = build_lr_scheduler(lr_config, hvd.size(), max_iterations)
init_step = (init_epoch - 1) * iters_per_epoch
lrscheduler.reset(init_step)
callbacks.append(lrscheduler)
if hvd.rank() == 0:
# Set up the checkpointer.
save_weights_dir = results_dir # no longer in `weights` dir
if not os.path.exists(results_dir):
os.mkdir(results_dir)
if not os.path.exists(save_weights_dir):
os.makedirs(save_weights_dir)
# Save encrypted models
checkpointer = EffCheckpoint(save_weights_dir, key, verbose=0, ckpt_freq=ckpt_freq)
callbacks.append(checkpointer)
# Set up the custom TensorBoard callback. It will log the loss
# after every step, and some images and user-set summaries only on
# the first step of every epoch.
tensorboard = TensorBoard(log_dir=os.path.join(results_dir, 'tb_events'))
callbacks.append(tensorboard)
# Set up the CSV logger, logging statistics after every epoch.
csvfilename = os.path.join(results_dir, 'training.csv')
csvlogger = CSVLoggerWithStatus(
csvfilename,
separator=',',
append=True)
callbacks.append(csvlogger)
if is_wandb_initialized():
callbacks.append(WandbCallback())
return callbacks
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/model/callback_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main training script."""
import logging
import os
import tensorflow as tf
import horovod.tensorflow as hvd
from tensorflow_quantization.custom_qdq_cases import EfficientNetQDQCase, ResNetV1QDQCase
from tensorflow_quantization.quantize import quantize_model
from nvidia_tao_tf2.blocks.module import TAOModule
from nvidia_tao_tf2.cv.classification.model.model_builder import get_model
from nvidia_tao_tf2.cv.classification.utils.helper import (
build_optimizer,
decode_tltb,
load_model,
setup_config)
logger = logging.getLogger(__name__)
class ClassifierModule(TAOModule):
"""TAO Classifer Module."""
def __init__(self, cfg, steps_per_epoch):
"""Init."""
self.cfg = cfg
self.steps_per_epoch = steps_per_epoch
self.pretrained_model = None
self.model = self._build_models(cfg)
self.initial_epoch, ckpt_path = self._get_latest_checkpoint(
cfg.results_dir, self.model.name)
self._load_pretrained_weights(ckpt_path, cfg)
self.configure_optimizers(cfg)
self.configure_losses(cfg)
self._quantize_models(cfg)
self.compile()
if hvd.rank() == 0:
self.model.summary()
def _quantize_models(self, cfg):
"""Quantize models."""
if cfg.train.qat and self.initial_epoch == 0:
logger.info("QAT enabled.")
qdq_cases = [EfficientNetQDQCase()] \
if 'efficientnet' in cfg.model.backbone else [ResNetV1QDQCase()]
self.model = quantize_model(self.model, custom_qdq_cases=qdq_cases)
def _build_models(self, cfg):
"""Build classification model."""
if cfg['data_format'] == 'channels_first':
input_shape = (cfg.model.input_channels, cfg.model.input_height, cfg.model.input_width)
else:
input_shape = (cfg.model.input_height, cfg.model.input_width, cfg.model.input_channels)
ka = {
'use_batch_norm': cfg['model']['use_batch_norm'],
'use_pooling': cfg['model']['use_pooling'],
'freeze_bn': cfg['model']['freeze_bn'],
'use_bias': cfg['model']['use_bias'],
'all_projections': cfg['model']['all_projections'],
'dropout': cfg['model']['dropout'],
'model_config_path': cfg['model']['byom_model'],
'passphrase': cfg['encryption_key'],
'activation_type': cfg['model']['activation_type'],
}
model = get_model(
backbone=cfg['model']['backbone'],
input_shape=input_shape,
data_format=cfg['data_format'],
nclasses=cfg['dataset']['num_classes'],
retain_head=cfg['model']['retain_head'],
freeze_blocks=cfg['model']['freeze_blocks'],
**ka)
# @scha: Load CUSTOM_OBJS from BYOM
if cfg['model']['backbone'] in ["byom"]:
custom_objs = decode_tltb(ka['model_config_path'], ka['passphrase'])['custom_objs']
else:
custom_objs = {}
# Set up BN and regularizer config
model = setup_config(
model,
cfg.train.reg_config,
bn_config=cfg.train.bn_config,
custom_objs=custom_objs
)
return model
def configure_losses(self, cfg, loss=None):
"""Configure losses."""
self.losses = tf.keras.losses.CategoricalCrossentropy(
label_smoothing=cfg.train.label_smoothing)
def configure_optimizers(self, cfg):
"""Configure optimizers."""
if self.initial_epoch:
self.model = self.pretrained_model
opt = self.pretrained_model.optimizer
else:
# Defining optimizer
opt = build_optimizer(cfg.train.optim_config)
# Add Horovod Distributed Optimizer
self.opt = hvd.DistributedOptimizer(
opt, backward_passes_per_step=1, average_aggregated_gradients=True)
def compile(self):
"""Compile model."""
self.model.compile(
loss=self.losses,
metrics=[tf.keras.metrics.CategoricalAccuracy(name='accuracy')],
optimizer=self.opt,
experimental_run_tf_function=False)
def _load_pretrained_weights(self, ckpt_path, cfg):
"""Load pretrained weights."""
ckpt_path = ckpt_path or cfg.train.checkpoint
if ckpt_path:
if hvd.rank() == 0:
logger.info("Loading pretrained model from %s", ckpt_path)
# Decrypt and load pretrained model
self.pretrained_model = load_model(
ckpt_path,
enc_key=cfg.encryption_key)
strict_mode = True
for layer in self.pretrained_model.layers[1:]:
# The layer must match up to prediction layers.
if 'predictions' in layer.name:
strict_mode = False
try:
l_return = self.model.get_layer(layer.name)
except ValueError:
# Some layers are not there
continue
try:
l_return.set_weights(layer.get_weights())
except ValueError:
if strict_mode:
if cfg.train.qat and self.initial_epoch > 0:
# resume QAT
self.model = self.pretrained_model
else:
# This is a pruned model
self.model = setup_config(
self.pretrained_model,
cfg.train.reg_config,
bn_config=cfg.train.bn_config
)
def _get_latest_checkpoint(self, model_dir, model_name='efficientnet-b'):
"""Get the last tlt checkpoint."""
if not os.path.exists(model_dir):
return 0, None
last_checkpoint = ''
for f in os.listdir(model_dir):
if f.startswith(model_name) and f.endswith('.tlt'):
last_checkpoint = last_checkpoint if last_checkpoint > f else f
if not last_checkpoint:
return 0, None
initial_epoch = int(last_checkpoint[:-4].split('_')[-1])
if hvd.rank() == 0:
logger.info('Resume training from #%d epoch', initial_epoch)
return initial_epoch, os.path.join(model_dir, last_checkpoint)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/model/classifier_module.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
from abc import ABC, abstractmethod
import numpy as np
from PIL import Image
from nvidia_tao_tf2.cv.classification.utils.preprocess_input import preprocess_input
class Inferencer(ABC):
"""Manages model inference."""
@abstractmethod
def __init__(self, model_path, input_shape=None, batch_size=None,
img_mean=None, keep_aspect_ratio=False, img_depth=8):
"""Init."""
pass
@abstractmethod
def infer_single(self, img_path):
"""Run inference on a single image."""
pass
def _load_img(self, img_path=None, img_alt=None):
"""load an image and returns the original image and a numpy array for model to consume.
Args:
img_path (str): path to an image
img_alt (np.array): only for testing (h, w, c)
Returns:
img (PIL.Image): PIL image of original image.
ratio (float): resize ratio of original image over processed image
inference_input (array): numpy array for processed image
"""
if img_path:
img = Image.open(img_path)
elif img_alt is not None:
img = Image.fromarray(img_alt)
else:
raise RuntimeError("image path is not defined.")
orig_w, orig_h = img.size
ratio = min(self._img_width / float(orig_w), self._img_height / float(orig_h))
# do not change aspect ratio
new_w = int(round(orig_w * ratio))
new_h = int(round(orig_h * ratio))
if self.keep_aspect_ratio:
im = img.resize((new_w, new_h), Image.ANTIALIAS)
else:
im = img.resize((self._img_width, self._img_height), Image.ANTIALIAS)
if im.mode in ('RGBA', 'LA') or \
(im.mode == 'P' and 'transparency' in im.info) and \
self.model_img_mode == 'L':
# Need to convert to RGBA if LA format due to a bug in PIL
im = im.convert('RGBA')
inf_img = Image.new("RGBA", (self._img_width, self._img_height))
inf_img.paste(im, (0, 0))
inf_img = inf_img.convert(self.model_img_mode)
else:
inf_img = Image.new(
self.model_img_mode,
(self._img_width, self._img_height)
)
inf_img.paste(im, (0, 0))
inf_img = np.array(inf_img).astype(np.float32)
if self.model_img_mode == 'L':
inf_img = np.expand_dims(inf_img, axis=2)
if not self.img_mean:
if self.img_depth == 8:
inference_input = inf_img - 117.3786
elif self.img_depth == 16:
inference_input = inf_img - 30048.9216
else:
raise ValueError(
f"Unsupported image depth: {self.img_depth}, should be 8 or 16, "
"please check `model.input_image_depth` in spec file")
else:
inference_input = inf_img - self.img_mean[0]
else:
inference_input = preprocess_input(inf_img,
data_format='channels_first',
img_mean=self.img_mean,
mode=self.preprocess_mode)
return img, float(orig_w) / new_w, inference_input
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/inferencer/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit inferencer for image classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/inferencer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for loading engine."""
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
class HostDeviceMem(object):
"""Simple helper data class that's a little nice to use than a 2-tuple."""
def __init__(self, host_mem, device_mem):
"""Init function."""
self.host = host_mem
self.device = device_mem
def __str__(self):
"""___str___."""
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
"""___repr___."""
return self.__str__()
def do_inference(context, bindings, inputs,
outputs, stream, batch_size=1,
execute_v2=False):
"""Generalization for multiple inputs/outputs.
inputs and outputs are expected to be lists of HostDeviceMem objects.
"""
# Transfer input data to the GPU.
for inp in inputs:
cuda.memcpy_htod_async(inp.device, inp.host, stream)
# Run inference.
if execute_v2:
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
else:
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
for out in outputs:
cuda.memcpy_dtoh_async(out.host, out.device, stream)
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def allocate_buffers(engine, context=None):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
context (trt.IExecutionContext): Context for dynamic shape engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32,
"BatchedNMS": np.int32, "BatchedNMS_1": np.float32,
"BatchedNMS_2": np.float32, "BatchedNMS_3": np.float32,
"generate_detections": np.float32,
"mask_head/mask_fcn_logits/BiasAdd": np.float32,
"softmax_1": np.float32,
"input_1": np.float32}
for binding in engine:
if context:
binding_id = engine.get_binding_index(str(binding))
size = trt.volume(context.get_binding_shape(binding_id))
else:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
# avoid error when bind to a number (YOLO BatchedNMS)
size = engine.max_batch_size if size == 0 else size
if str(binding) in binding_to_type:
dtype = binding_to_type[str(binding)]
else:
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def load_engine(trt_runtime, engine_path):
"""Helper funtion to load an exported engine."""
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/inferencer/engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import numpy as np
from nvidia_tao_tf2.cv.classification.inferencer.inferencer import Inferencer
from nvidia_tao_tf2.cv.classification.utils.helper import load_model
from nvidia_tao_tf2.cv.classification.utils.preprocess_crop import load_and_crop_img
from nvidia_tao_tf2.cv.classification.utils.preprocess_input import preprocess_input
class KerasInferencer(Inferencer):
"""Keras inferencer"""
def __init__(self, model_path,
img_mean=None,
keep_aspect_ratio=True,
key=None,
preprocess_mode='torch',
interpolation='bilinear',
img_depth=8,
data_format='channels_first'):
"""Init."""
self.model_path = model_path
self.img_mean = img_mean
self.keep_aspect_ratio = keep_aspect_ratio
self.key = key
self.preprocess_mode = preprocess_mode
self.interpolation = interpolation
self.img_depth = img_depth
self._data_format = data_format
self._load_model(model_path)
def _load_model(self, model_path) -> None:
self.model = load_model(model_path, self.key)
self.model.summary()
self._input_shape = tuple(self.model.layers[0].input_shape[0])
if self._data_format == "channels_first":
self._img_height, self._img_width = self._input_shape[2:4]
self._nchannels = self._input_shape[1]
else:
self._img_height, self._img_width = self._input_shape[1:3]
self._nchannels = self._input_shape[3]
self.model_img_mode = 'rgb' if self._nchannels == 3 else 'grayscale'
def _load_img(self, img_path):
image = load_and_crop_img(
img_path,
grayscale=False,
color_mode=self.model_img_mode,
target_size=(self._img_height, self._img_width),
interpolation=self.interpolation,
)
image = np.array(image).astype(np.float32)
if self.model_img_mode == 'grayscale' and image.ndim == 2:
image = np.expand_dims(image, axis=2)
return preprocess_input(
image,
mode=self.preprocess_mode, color_mode=self.model_img_mode,
img_mean=self.img_mean,
data_format='channels_last', img_depth=self.img_depth)
def infer_single(self, img_path):
"""Run inference on a single image with the tlt model."""
infer_input = self._load_img(img_path)
if self._data_format == "channels_first":
infer_input = infer_input.transpose(2, 0, 1)
infer_input = infer_input[None, ...]
# Keras inference
raw_predictions = self.model.predict(infer_input, batch_size=1)
return raw_predictions
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/inferencer/keras_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import numpy as np
import tensorrt as trt
from nvidia_tao_tf2.cv.classification.inferencer.inferencer import Inferencer
from nvidia_tao_tf2.cv.classification.inferencer.engine import allocate_buffers, do_inference, load_engine
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
TRT_DYNAMIC_DIM = -1
class TRTInferencer(Inferencer):
"""Manages TensorRT objects for model inference."""
def __init__(self, model_path, input_shape=None, batch_size=None,
img_mean=None, keep_aspect_ratio=False,
data_format='channel_first', img_depth=8):
"""Initializes TensorRT objects needed for model inference.
Args:
model_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
img_depth (int): depth of images, only support 8-bit or 16-bit
data_format (str): 'channel_first' or 'channel_last'
"""
# We first load all custom plugins shipped with TensorRT,
# some of them will be needed during inference
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
# Initialize runtime needed for loading TensorRT engine from file
self.trt_runtime = trt.Runtime(TRT_LOGGER)
self.trt_engine = load_engine(self.trt_runtime, model_path)
self.max_batch_size = batch_size or self.trt_engine.max_batch_size
self.execute_v2 = True
# Execution context is needed for inference
self.context = self.trt_engine.create_execution_context()
def override_shape(shape, batch_size):
return (batch_size if dim == TRT_DYNAMIC_DIM else dim for dim in shape)
# Allocate memory for multiple usage [e.g. multiple batch inference]
# Resolve dynamic shapes in the context
for binding in self.trt_engine:
binding_idx = self.trt_engine.get_binding_index(binding)
shape = input_shape or self.trt_engine.get_binding_shape(binding_idx)
if self.trt_engine.binding_is_input(binding_idx):
assert binding_idx == 0, "More than 1 input is detected."
if TRT_DYNAMIC_DIM in shape:
shape = override_shape(shape, self.max_batch_size)
self.execute_v2 = True
self.context.set_binding_shape(binding_idx, shape)
self._input_shape = shape
if data_format == "channels_first":
self._img_height, self._img_width = self._input_shape[2:4]
self._nchannels = self._input_shape[1]
else:
self._img_height, self._img_width = self._input_shape[1:3]
self._nchannels = self._input_shape[3]
self.model_img_mode = 'RGB' if self._nchannels == 3 else 'L'
assert self._input_shape, "Input shape not detected."
assert self._nchannels in [1, 3], "Invalid input image dimension."
print(f"TensorRT engine input shape: {self._input_shape}")
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(
self.trt_engine,
self.context)
input_volume = trt.volume(self._input_shape)
self.numpy_array = np.zeros((self.max_batch_size, input_volume))
self.img_mean = img_mean
self.keep_aspect_ratio = keep_aspect_ratio
self.model_img_mode = 'RGB' if self._nchannels == 3 else 'L'
self.img_depth = img_depth
assert self.img_depth in [8, 16], "Only 8-bit and 16-bit images are supported"
def clear_buffers(self):
"""Simple function to free input, output buffers allocated earlier.
Args:
No explicit arguments. Inputs and outputs are member variables.
Returns:
No explicit returns.
Raises:
ValueError if buffers not found.
"""
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
def clear_trt_session(self):
"""Simple function to free destroy tensorrt handlers.
Args:
No explicit arguments. Destroys context, runtime and engine.
Returns:
No explicit returns.
Raises:
ValueError if buffers not found.
"""
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.trt_engine:
del self.trt_engine
if self.stream:
del self.stream
def infer_single(self, img_path):
"""Infers model on batch of same sized images resized to fit the model.
Args:
img_path (str): path to a single image file
"""
# load image
_, _, infer_input = self._load_img(img_path)
infer_input = infer_input.transpose(2, 0, 1)
imgs = infer_input[None, ...]
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
actual_batch_size = len(imgs)
if actual_batch_size > max_batch_size:
raise ValueError(
f"image_paths list bigger ({actual_batch_size}) than engine max batch size ({max_batch_size})")
self.numpy_array[:actual_batch_size] = imgs.reshape(actual_batch_size, -1)
print(self.numpy_array.shape)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, self.numpy_array.ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
return results
def __del__(self):
"""Clear things up on object deletion."""
self.clear_trt_session()
self.clear_buffers()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/inferencer/trt_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to store default experiment specs for classification tasks."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/experiment_specs/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing callbacks for classification."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/callback/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Callbacks: utilities called at certain points during model training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from zipfile import ZipFile
from tensorflow.keras.callbacks import ModelCheckpoint
from eff.core import Archive, File
from eff.callbacks import BinaryContentCallback
class EffCheckpoint(ModelCheckpoint):
"""Save the encrypted model after every epoch.
Attributes:
passphrase: API key to encrypt the model.
epocs_since_last_save: Number of epochs since model was last saved.
save_best_only: Flag to save model with best accuracy.
best: saved instance of best model.
verbose: Enable verbose messages.
"""
def __init__(self, eff_dir, key,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
ckpt_freq=1,
options=None,
**kwargs):
"""Initialization with encryption key."""
super().__init__(
eff_dir,
monitor=monitor,
verbose=verbose,
save_best_only=save_best_only,
save_weights_only=save_weights_only,
mode=mode,
save_freq=save_freq,
options=options)
self.passphrase = key
self.epochs_since_last_save = 0
self.eff_dir = eff_dir
self.ckpt_freq = ckpt_freq
def zipdir(self, src, zip_path) -> None:
"""Function creates zip archive from src in dst location. The name of archive is zip_name.
:param src: Path to directory to be archived.
:param dst: Path where archived dir will be stored.
:param zip_name: The name of the archive.
:return: None
"""
# destination directory
os.chdir(os.path.dirname(zip_path))
# zipfile handler
with ZipFile(zip_path, "w") as zf:
# writing content of src directory to the archive
for root, _, filenames in os.walk(src):
for filename in filenames:
zf.write(
os.path.join(root, filename),
arcname=os.path.join(root.replace(src, ""), filename))
def _save_eff(self, epoch, metadata={}):
"""Save EFF Archive."""
epoch += 1
os_handle, self.temp_zip_file = tempfile.mkstemp()
os.close(os_handle)
# create zipfile from saved_model directory
self.zipdir(self.filepath, self.temp_zip_file)
# create artifacts from zipfile
eff_filename = f'{self.model.name}_{epoch:03d}.tlt'
zip_art = File(
name=eff_filename,
description="Artifact from checkpoint",
filepath=self.temp_zip_file,
encryption=bool(self.passphrase),
content_callback=BinaryContentCallback,
)
eff_filepath = os.path.join(self.eff_dir, eff_filename)
Archive.save_artifact(
save_path=eff_filepath, artifact=zip_art, passphrase=self.passphrase, **metadata)
def _remove_tmp_files(self):
"""Remove temporary zip file and directory."""
shutil.rmtree(self.filepath)
os.remove(self.temp_zip_file)
def on_epoch_end(self, epoch, logs=None):
"""Override on_epoch_end."""
self.epochs_since_last_save += 1
# pylint: disable=protected-access
if self.save_freq == 'epoch' and self.epochs_since_last_save % self.ckpt_freq == 0:
self.filepath = tempfile.mkdtemp() # override filepath
self._save_model(epoch=epoch, batch=None, logs=logs)
self._save_eff(epoch=epoch)
self._remove_tmp_files()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/callback/eff_checkpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom CSV Logger."""
from datetime import timedelta
import time
from tensorflow.keras.callbacks import CSVLogger
import nvidia_tao_tf2.common.logging.logging as status_logging
class CSVLoggerWithStatus(CSVLogger):
"""Callback that streams epoch results to a CSV file and status logger.
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
"""
def __init__(self, *args, **kwargs) -> None:
"""Init."""
super().__init__(*args, **kwargs)
self.s_logger = status_logging.get_status_logger()
def on_epoch_begin(self, epoch, logs=None):
"""on_epoch_begin."""
self._epoch_start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
"""Add metrics to status logger on epoch end."""
epoch = epoch + 1
super().on_epoch_end(epoch, logs)
for key in self.keys:
self.s_logger.kpi[key] = float(logs[key])
time_per_epoch = time.time() - self._epoch_start_time
monitored_data = {
"epoch": epoch,
"time_per_epoch": str(timedelta(seconds=time_per_epoch)),
}
self.s_logger.write(
data=monitored_data,
status_level=status_logging.Status.RUNNING)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/callback/csv_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained SSD model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
import copy
import os
import shutil
import logging
import tempfile
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import tensorflow as tf
from tf2onnx import tf_loader, convert
from nvidia_tao_tf2.cv.classification.utils.helper import decode_eff
TRT_VERSION = trt.__version__
TRT_MAJOR = float(".".join(TRT_VERSION.split(".")[:2]))
logger = logging.getLogger(__name__)
class Exporter:
"""Define an exporter for classification models."""
def __init__(self,
config=None,
min_batch_size=1,
opt_batch_size=4,
max_batch_size=8,
**kwargs):
"""Initialize the classification exporter."""
self.config = config
self.backend = "onnx"
self.input_shape = None
self.max_batch_size = max_batch_size
self.min_batch_size = min_batch_size
self.opt_batch_size = opt_batch_size
self._saved_model = decode_eff(
str(self.config.export.checkpoint),
self.config.encryption_key)
_handle, self.tmp_onnx = tempfile.mkstemp(suffix='onnx')
os.close(_handle)
def _set_input_shape(self):
model = tf.keras.models.load_model(self._saved_model, custom_objects=None)
self.input_shape = tuple(model.layers[0].input_shape[0][1:4])
def export_onnx(self) -> None:
"""Convert Keras saved model into ONNX format.
Args:
root_dir: root directory containing the quantized Keras saved model. This is the same directory where the ONNX
file will be saved.
saved_model_dir: name of the quantized 'saved_model' directory.
onnx_filename: desired name to save the converted ONNX file.
"""
# 1. Let TensorRT optimize QDQ nodes instead of TF
from tf2onnx.optimizer import _optimizers # noqa pylint: disable=C0415
updated_optimizers = copy.deepcopy(_optimizers)
del updated_optimizers["q_dq_optimizer"]
del updated_optimizers["const_dequantize_optimizer"]
# 2. Extract graph definition from SavedModel
graph_def, inputs, outputs = tf_loader.from_saved_model(
model_path=self._saved_model,
input_names=None,
output_names=None,
tag="serve",
signatures=["serving_default"]
)
# 3. Convert tf2onnx and save onnx file
if str(self.config.export.onnx_file).endswith('.onnx'):
onnx_path = self.config.export.onnx_file
else:
raise ValueError("The exported file must use .onnx as the extension.")
model_proto, _ = convert._convert_common(
graph_def,
name=self._saved_model,
opset=13,
input_names=inputs,
output_names=outputs,
output_path=onnx_path,
optimizers=updated_optimizers
)
graph = gs.import_onnx(model_proto)
graph.inputs[0].name = "input_1"
graph.cleanup().toposort()
onnx_model = gs.export_onnx(graph)
onnx.save(onnx_model, onnx_path)
logger.info("ONNX conversion completed.")
def _del(self):
"""Remove temp files."""
shutil.rmtree(self._saved_model)
def export(self):
"""Export ONNX model."""
self._set_input_shape()
self.export_onnx()
logger.info("The ONNX model is saved at %s", self.config.export.onnx_file)
self._del()
def _build_profile(self, builder, network, profile_shapes, default_shape_value=1):
"""Build optimization profile for the builder and configure the min, opt, max shapes appropriately."""
def is_dimension_dynamic(dim):
return dim is None or dim <= 0
def override_shape(shape):
return (1 if is_dimension_dynamic(dim) else dim for dim in shape)
profile = builder.create_optimization_profile()
for idx in range(network.num_inputs):
inp = network.get_input(idx)
def get_profile_shape(name):
# if name not in profile_shapes:
# profile_shapes={'input': [(),(),()]} and name='input_1:0
profile_name = None
for k in profile_shapes.keys():
if k in name:
profile_name = k
if profile_name is None: # not any([k in name for k in profile_shapes.keys()]):
return None
shapes = profile_shapes[profile_name]
if not isinstance(shapes, list) or len(shapes) != 3:
logger.critical("Profile values must be a list containing exactly 3 shapes (tuples or Dims)")
return shapes
if inp.is_shape_tensor:
shapes = get_profile_shape(inp.name)
if not shapes:
rank = inp.shape[0]
shapes = [(default_shape_value,) * rank] * 3
print(
"Setting shape input to {:}. If this is incorrect, for shape input: {:}, please provide tuples for min, opt, and max shapes containing {:} elements".format( # noqa pylint: disable=C0209
shapes[0], inp.name, rank
)
)
min, opt, max = shapes # noqa pylint: disable=W0622
profile.set_shape_input(inp.name, min, opt, max)
print(
"Setting shape input: {:} values to min: {:}, opt: {:}, max: {:}".format( # noqa pylint: disable=C0209
inp.name, min, opt, max
)
)
elif -1 in inp.shape:
shapes = get_profile_shape(inp.name)
if not shapes:
shapes = [override_shape(inp.shape)] * 3
print(
"Overriding dynamic input shape {:} to {:}. If this is incorrect, for input tensor: {:}, please provide tuples for min, opt, and max shapes containing values: {:} with dynamic dimensions replaced,".format( # noqa pylint: disable=C0209
inp.shape, shapes[0], inp.name, inp.shape
)
)
min, opt, max = shapes
profile.set_shape(inp.name, min, opt, max)
print(
"Setting input: {:} shape to min: {:}, opt: {:}, max: {:}".format( # noqa pylint: disable=C0209
inp.name, min, opt, max
)
)
if not profile:
print(
"Profile is not valid, please provide profile data. Note: profile was: {:}".format( # noqa pylint: disable=C0209
profile_shapes
)
)
return profile
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/export/classification_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification Trainer."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/trainer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification Trainer."""
import logging
from nvidia_tao_tf2.blocks.trainer import Trainer
logger = logging.getLogger(__name__)
class ClassifierTrainer(Trainer):
"""Classifier Trainer."""
def __init__(self, num_epochs, callbacks=None, cfg=None):
"""Init."""
self.num_epochs = num_epochs
self.callbacks = callbacks
self.cfg = cfg
def fit(self,
module,
train_dataset,
eval_dataset,
verbose) -> None:
"""Run model.fit with custom steps."""
if module.initial_epoch < self.num_epochs:
module.model.fit(
train_dataset,
epochs=self.num_epochs,
steps_per_epoch=module.steps_per_epoch,
initial_epoch=module.initial_epoch,
callbacks=self.callbacks,
verbose=verbose,
workers=self.cfg['train']['n_workers'],
validation_data=eval_dataset,
validation_steps=len(eval_dataset),
validation_freq=1)
else:
logger.info("Training (%d epochs) has finished.", self.num_epochs)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/trainer/classifier_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf2.cv.core import box_list
from nvidia_tao_tf2.cv.core import shape_utils
KEYPOINTS_FIELD_NAME = 'keypoints'
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self, similarity_calc, matcher, box_coder,
negative_class_weight=1.0, unmatched_cls_target=None):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with
respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if unmatched_cls_target is None:
self._unmatched_cls_target = tf.constant([0], tf.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
@property
def box_coder(self):
return self._box_coder
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,
groundtruth_weights=None, **params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(
self._unmatched_cls_target))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=self._unmatched_cls_target,
ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/target_assigner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
import abc
import tensorflow.compat.v1 as tf
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.shape(self.matched_column_indices())[0]
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.shape(self.unmatched_column_indices())[0]
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the column is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.shape(self.ignored_column_indices())[0]
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
tf.gather(self._match_results, self.matched_column_indices()))
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]),
input_tensor], axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher.
"""
__metaclass__ = abc.ABCMeta
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.
"""
with tf.name_scope(scope, 'Match', [similarity_matrix, params]) as scope:
return Match(self._match(similarity_matrix, **params))
@abc.abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/matcher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf2.cv.core import box_coder
from nvidia_tao_tf2.cv.core import box_list
EPSILON = 1e-8
class FasterRcnnBoxCoder(box_coder.BoxCoder):
"""Faster RCNN box coder."""
def __init__(self, scale_factors=None):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
ha = tf.maximum(EPSILON, ha)
wa = tf.maximum(EPSILON, wa)
h = tf.maximum(EPSILON, h)
w = tf.maximum(EPSILON, w)
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.log(w / wa)
th = tf.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/faster_rcnn_box_coder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
import tensorflow.compat.v1 as tf
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/shape_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import tensorflow.compat.v1 as tf
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder(object):
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overridden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overridden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
encoded_boxes.get_shape().assert_has_rank(3)
if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(encoded_boxes.get_shape()[1].value,
anchors.num_boxes_static()))
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/box_coder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit core module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf2.cv.core import box_list
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation,
scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'FlipHorizontal'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None):
"""Randomly flips the image and detections horizontally.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),
lambda: keypoints)
result.append(keypoints)
return tuple(result)
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = int(round(orig_height * small_scale_factor))
small_width = int(round(orig_width * small_scale_factor))
small_size = [small_height, small_width]
new_size = large_size
if max(large_size) > max_dimension:
new_size = small_size
else:
new_size = large_size
return tf.constant(new_size + [num_channels])
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
"""Compute new dynamic shape for resize_to_range method."""
image_shape = tf.shape(image)
orig_height = tf.to_float(image_shape[0])
orig_width = tf.to_float(image_shape[1])
num_channels = image_shape[2]
orig_min_dim = tf.minimum(orig_height, orig_width)
# Calculates the larger of the possible sizes
min_dimension = tf.constant(min_dimension, dtype=tf.float32)
large_scale_factor = min_dimension / orig_min_dim
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))
large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))
large_size = tf.stack([large_height, large_width])
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = tf.maximum(orig_height, orig_width)
max_dimension = tf.constant(max_dimension, dtype=tf.float32)
small_scale_factor = max_dimension / orig_max_dim
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))
small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
tf.to_float(tf.reduce_max(large_size)) > max_dimension,
lambda: small_size, lambda: large_size)
else:
new_size = large_size
return tf.stack(tf.unstack(new_size) + [num_channels])
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros
so the resulting image is of the spatial size
[max_dimension, max_dimension]. If masks are included they are padded
similarly.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeToRange', values=[image, min_dimension]):
if image.get_shape().is_fully_defined():
new_size = _compute_new_static_size(image, min_dimension, max_dimension)
else:
new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)
new_image = tf.image.resize_images(
image, new_size[:-1], method=method, align_corners=align_corners)
if pad_to_max_dimension:
new_image = tf.image.pad_to_bounding_box(
new_image, 0, 0, max_dimension, max_dimension)
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_images(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
new_masks = tf.squeeze(new_masks, 3)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, max_dimension, max_dimension)
result.append(new_masks)
result.append(new_size)
return result
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def box_list_scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def keypoint_scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/preprocessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow.compat.v1 as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferable at graph construction time.
"""
return self.data['boxes'].get_shape().as_list()[0]
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/box_list.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow.compat.v1 as tf
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return iou(boxlist1, boxlist2)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/region_similarity_calculator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow.compat.v1 as tf
def _get_source_id_from_encoded_image(parsed_tensors):
return tf.strings.as_string(
tf.strings.to_hash_bucket_fast(parsed_tensors['image/encoded'],
2**63 - 1))
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self, include_mask=False, regenerate_source_id=False,
include_image=False):
self._include_mask = include_mask
self._include_image = include_image
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string),
'image/filename': tf.FixedLenFeature((), tf.string, ''),
'image/source_id': tf.FixedLenFeature((), tf.string, ''),
'image/height': tf.FixedLenFeature((), tf.int64, -1),
'image/width': tf.FixedLenFeature((), tf.int64, -1),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/area': tf.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.VarLenFeature(tf.string),
})
if include_image:
self._keys_to_features.update(
{
'image/encoded':
tf.FixedLenFeature((), tf.string),
}
)
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
tf.greater(tf.shape(masks)[0], 0),
lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- source_id: a string scalar tensor.
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
parsed_tensors = tf.io.parse_single_example(
serialized_example, self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value=0)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
if self._include_image:
image = self._decode_image(parsed_tensors)
decode_image_shape = tf.logical_or(
tf.equal(parsed_tensors['image/height'], -1),
tf.equal(parsed_tensors['image/width'], -1))
image_shape = tf.cast(tf.shape(image), dtype=tf.int64)
parsed_tensors['image/height'] = tf.where(decode_image_shape,
image_shape[0],
parsed_tensors['image/height'])
parsed_tensors['image/width'] = tf.where(decode_image_shape, image_shape[1],
parsed_tensors['image/width'])
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long
if self._regenerate_source_id:
source_id = _get_source_id_from_encoded_image(parsed_tensors)
else:
source_id = tf.cond(
tf.greater(tf.strings.length(parsed_tensors['image/source_id']),
0), lambda: parsed_tensors['image/source_id'],
lambda: _get_source_id_from_encoded_image(parsed_tensors))
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'source_id': source_id,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._include_image:
decoded_tensors.update({'image': image})
else:
decoded_tensors.update({'filename': parsed_tensors['image/filename']})
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/tf_example_decoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow.compat.v1 as tf
from nvidia_tao_tf2.cv.core import matcher
from nvidia_tao_tf2.cv.core import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and unmatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: %s, unmatched: %s',
self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0] == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return x * (1 - indicator) + val * indicator
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/core/argmax_matcher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit EfficientDet root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing losses implementation for EfficientDet."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/losses/__init__.py |
"""EfficientDet losses."""
import numpy as np
import tensorflow as tf
import math
from typing import Union, Text
from nvidia_tao_tf2.cv.efficientdet.model import anchors
FloatType = Union[tf.Tensor, float, np.float32, np.float64]
class FocalLoss(tf.keras.losses.Loss):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
"""
def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):
"""Initialize focal loss.
Args:
alpha: A float32 scalar multiplying alpha to the loss from positive
examples and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
**kwargs: other params.
"""
super().__init__(**kwargs)
self.alpha = alpha
self.gamma = gamma
self.label_smoothing = label_smoothing
@tf.autograph.experimental.do_not_convert
def call(self, y, y_pred): # noqa pylint: disable=W0237
"""Compute focal loss for y and y_pred.
Args:
y: A tuple of (normalizer, y_true), where y_true is the target class.
y_pred: A float32 tensor [batch, height_in, width_in, num_predictions].
Returns:
the focal loss.
"""
normalizer, y_true = y
alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)
gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)
# compute focal loss multipliers before label smoothing, such that it will
# not blow up the loss.
pred_prob = tf.sigmoid(y_pred)
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
modulating_factor = (1.0 - p_t)**gamma
# apply label smoothing for cross_entropy for each entry.
y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
# compute the final loss and return
return alpha_factor * modulating_factor * ce / normalizer
class StableFocalLoss(tf.keras.losses.Loss):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Below are comments/derivations for computing modulator.
For brevity, let x = logits, z = targets, r = gamma, and p_t = sigmod(x)
for positive samples and 1 - sigmoid(x) for negative examples.
The modulator, defined as (1 - P_t)^r, is a critical part in focal loss
computation. For r > 0, it puts more weights on hard examples, and less
weights on easier ones. However if it is directly computed as (1 - P_t)^r,
its back-propagation is not stable when r < 1. The implementation here
resolves the issue.
For positive samples (labels being 1),
(1 - p_t)^r
= (1 - sigmoid(x))^r
= (1 - (1 / (1 + exp(-x))))^r
= (exp(-x) / (1 + exp(-x)))^r
= exp(log((exp(-x) / (1 + exp(-x)))^r))
= exp(r * log(exp(-x)) - r * log(1 + exp(-x)))
= exp(- r * x - r * log(1 + exp(-x)))
For negative samples (labels being 0),
(1 - p_t)^r
= (sigmoid(x))^r
= (1 / (1 + exp(-x)))^r
= exp(log((1 / (1 + exp(-x)))^r))
= exp(-r * log(1 + exp(-x)))
Therefore one unified form for positive (z = 1) and negative (z = 0)
samples is: (1 - p_t)^r = exp(-r * z * x - r * log(1 + exp(-x))).
"""
def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):
"""Initialize focal loss.
Args:
alpha: A float32 scalar multiplying alpha to the loss from positive
examples and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
**kwargs: other params.
"""
super().__init__(**kwargs)
self.alpha = alpha
self.gamma = gamma
self.label_smoothing = label_smoothing
@tf.autograph.experimental.do_not_convert
def call(self, y, y_pred): # noqa pylint: disable=W0237
"""Compute focal loss for y and y_pred.
Args:
y: A tuple of (normalizer, y_true), where y_true is the target class.
y_pred: A float32 tensor [batch, height_in, width_in, num_predictions].
Returns:
the focal loss.
"""
normalizer, y_true = y
alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)
gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)
positive_label_mask = tf.equal(y_true, 1.0)
negative_pred = -1.0 * y_pred
modulator = tf.exp(gamma * y_true * negative_pred - gamma * tf.math.log1p(tf.exp(negative_pred)))
# apply label smoothing for cross_entropy for each entry.
y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
loss = modulator * ce
weighted_loss = tf.where(positive_label_mask, alpha * loss, (1.0 - alpha) * loss)
weighted_loss /= normalizer
return weighted_loss
class BoxLoss(tf.keras.losses.Loss):
"""L2 box regression loss."""
def __init__(self, delta=0.1, **kwargs):
"""Initialize box loss.
Args:
delta: `float`, the point where the huber loss function changes from a
quadratic to linear. It is typically around the mean value of regression
target. For instances, the regression targets of 512x512 input with 6
anchors on P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
**kwargs: other params.
"""
super().__init__(**kwargs)
self.huber = tf.keras.losses.Huber(delta, reduction=tf.keras.losses.Reduction.NONE)
@tf.autograph.experimental.do_not_convert
def call(self, y_true, box_outputs): # noqa pylint: disable=W0237
"""Call."""
num_positives, box_targets = y_true
normalizer = num_positives * 4.0
mask = tf.cast(box_targets != 0.0, tf.float32)
box_targets = tf.expand_dims(box_targets, axis=-1)
box_outputs = tf.expand_dims(box_outputs, axis=-1)
box_loss = self.huber(box_targets, box_outputs) * mask
box_loss = tf.reduce_sum(box_loss)
box_loss /= normalizer
return box_loss
class BoxIouLoss(tf.keras.losses.Loss):
"""Box iou loss."""
def __init__(self, iou_loss_type, min_level, max_level, num_scales,
aspect_ratios, anchor_scale, image_size, **kwargs):
"""Init BoxIOU Loss."""
super().__init__(**kwargs)
self.iou_loss_type = iou_loss_type
self.input_anchors = anchors.Anchors(min_level, max_level, num_scales,
aspect_ratios, anchor_scale,
image_size)
@tf.autograph.experimental.do_not_convert
def call(self, y_true, box_outputs): # noqa pylint: disable=W0237
"""Call."""
anchor_boxes = tf.tile(
self.input_anchors.boxes,
[box_outputs.shape[0] // self.input_anchors.boxes.shape[0], 1])
num_positives, box_targets = y_true
mask = tf.cast(box_targets != 0.0, tf.float32)
box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes) * mask
box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes) * mask
normalizer = num_positives * 4.0
box_iou_loss = iou_loss(box_outputs, box_targets, self.iou_loss_type)
box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer
return box_iou_loss
def _get_v(b1_height: FloatType, b1_width: FloatType, b2_height: FloatType,
b2_width: FloatType) -> tf.Tensor:
"""Get the consistency measurement of aspect ratio for ciou."""
@tf.custom_gradient
def _get_grad_v(height, width):
"""backpropogate gradient."""
arctan = tf.atan(tf.math.divide_no_nan(b1_width, b1_height)) - tf.atan(
tf.math.divide_no_nan(width, height))
v = 4 * ((arctan / math.pi)**2)
def _grad_v(dv):
"""Grad for eager mode."""
gdw = dv * 8 * arctan * height / (math.pi**2)
gdh = -dv * 8 * arctan * width / (math.pi**2)
return [gdh, gdw]
def _grad_v_graph(dv, variables):
"""Grad for graph mode."""
gdw = dv * 8 * arctan * height / (math.pi**2)
gdh = -dv * 8 * arctan * width / (math.pi**2)
return [gdh, gdw], tf.gradients(v, variables, grad_ys=dv)
if tf.compat.v1.executing_eagerly_outside_functions():
return v, _grad_v
return v, _grad_v_graph
return _get_grad_v(b2_height, b2_width)
def _iou_per_anchor(pred_boxes: FloatType,
target_boxes: FloatType,
iou_type: Text = 'iou') -> tf.Tensor:
"""Computing the IoU for a single anchor.
Args:
pred_boxes: predicted boxes, with coordinate [y_min, x_min, y_max, x_max].
target_boxes: target boxes, with coordinate [y_min, x_min, y_max, x_max].
iou_type: one of ['iou', 'ciou', 'diou', 'giou'].
Returns:
IoU loss float `Tensor`.
"""
# t_ denotes target boxes and p_ denotes predicted boxes.
t_ymin, t_xmin, t_ymax, t_xmax = target_boxes
p_ymin, p_xmin, p_ymax, p_xmax = pred_boxes
zero = tf.convert_to_tensor(0.0, t_ymin.dtype)
p_width = tf.maximum(zero, p_xmax - p_xmin)
p_height = tf.maximum(zero, p_ymax - p_ymin)
t_width = tf.maximum(zero, t_xmax - t_xmin)
t_height = tf.maximum(zero, t_ymax - t_ymin)
p_area = p_width * p_height
t_area = t_width * t_height
intersect_ymin = tf.maximum(p_ymin, t_ymin)
intersect_xmin = tf.maximum(p_xmin, t_xmin)
intersect_ymax = tf.minimum(p_ymax, t_ymax)
intersect_xmax = tf.minimum(p_xmax, t_xmax)
intersect_width = tf.maximum(zero, intersect_xmax - intersect_xmin)
intersect_height = tf.maximum(zero, intersect_ymax - intersect_ymin)
intersect_area = intersect_width * intersect_height
union_area = p_area + t_area - intersect_area
iou_v = tf.math.divide_no_nan(intersect_area, union_area)
if iou_type == 'iou':
return iou_v # iou is the simplest form.
enclose_ymin = tf.minimum(p_ymin, t_ymin)
enclose_xmin = tf.minimum(p_xmin, t_xmin)
enclose_ymax = tf.maximum(p_ymax, t_ymax)
enclose_xmax = tf.maximum(p_xmax, t_xmax)
assert iou_type in ('giou', 'diou', 'ciou')
if iou_type == 'giou': # giou is the generalized iou.
enclose_width = tf.maximum(zero, enclose_xmax - enclose_xmin)
enclose_height = tf.maximum(zero, enclose_ymax - enclose_ymin)
enclose_area = enclose_width * enclose_height
giou_v = iou_v - tf.math.divide_no_nan(
(enclose_area - union_area), enclose_area)
return giou_v
assert iou_type in ('diou', 'ciou')
p_center = tf.stack([(p_ymin + p_ymax) / 2, (p_xmin + p_xmax) / 2], axis=-1)
t_center = tf.stack([(t_ymin + t_ymax) / 2, (t_xmin + t_xmax) / 2], axis=-1)
euclidean = tf.linalg.norm(t_center - p_center, axis=-1)
diag_length = tf.linalg.norm(
tf.stack(
[enclose_ymax - enclose_ymin, enclose_xmax - enclose_xmin],
axis=-1),
axis=-1)
diou_v = iou_v - tf.math.divide_no_nan(euclidean**2, diag_length**2)
if iou_type == 'diou': # diou is the distance iou.
return diou_v
assert iou_type == 'ciou'
v = _get_v(p_height, p_width, t_height, t_width)
alpha = tf.math.divide_no_nan(v, ((1 - iou_v) + v))
return diou_v - alpha * v # the last one is ciou.
def iou_loss(pred_boxes: FloatType,
target_boxes: FloatType,
iou_type: Text = 'iou') -> tf.Tensor:
"""A unified interface for computing various IoU losses.
Let B and B_gt denotes the pred_box and B_gt is the target box (ground truth):
IoU = |B & B_gt| / |B | B_gt|
GIoU = IoU - |C - B U B_gt| / C, where C is the smallest box covering B and
B_gt.
DIoU = IoU - E(B, B_gt)^2 / c^2, E is the Euclidean distance of the center
points of B and B_gt, and c is the diagonal length of the smallest box
covering the two boxes
CIoU = IoU - DIoU - a * v, where a is a positive trade-off parameter, and
v measures the consistency of aspect ratio:
v = (arctan(w_gt / h_gt) - arctan(w / h)) * 4 / pi^2
where (w_gt, h_gt) and (w, h) are the width and height of the target and
predicted box respectively.
The returned loss is computed as 1 - one of {IoU, GIoU, DIoU, CIoU}.
Args:
pred_boxes: predicted boxes, with coordinate [y_min, x_min, y_max, x_max]*.
It can be multiple anchors, with each anchor box has four coordinates.
target_boxes: target boxes, with coordinate [y_min, x_min, y_max, x_max]*.
It can be multiple anchors, with each anchor box has four coordinates.
iou_type: one of ['iou', 'ciou', 'diou', 'giou'].
Returns:
IoU loss float `Tensor`.
"""
if iou_type not in ('iou', 'ciou', 'diou', 'giou'):
raise ValueError(
f'Unknown loss_type {iou_type}, not iou/ciou/diou/giou')
pred_boxes = tf.convert_to_tensor(pred_boxes, tf.float32)
target_boxes = tf.cast(target_boxes, pred_boxes.dtype)
# t_ denotes target boxes and p_ denotes predicted boxes: (y, x, y_max, x_max)
pred_boxes_list = tf.unstack(pred_boxes, None, axis=-1)
target_boxes_list = tf.unstack(target_boxes, None, axis=-1)
assert len(pred_boxes_list) == len(target_boxes_list)
assert len(pred_boxes_list) % 4 == 0
iou_loss_list = []
for i in range(0, len(pred_boxes_list), 4):
pred_boxes = pred_boxes_list[i:i + 4]
target_boxes = target_boxes_list[i:i + 4]
# Compute mask.
t_ymin, t_xmin, t_ymax, t_xmax = target_boxes
mask = tf.math.logical_and(t_ymax > t_ymin, t_xmax > t_xmin)
mask = tf.cast(mask, t_ymin.dtype)
# Loss should be mask * (1 - iou) = mask - masked_iou.
pred_boxes = [b * mask for b in pred_boxes]
target_boxes = [b * mask for b in target_boxes]
iou_loss_list.append(
mask *
(1 - tf.squeeze(_iou_per_anchor(pred_boxes, target_boxes, iou_type))))
if len(iou_loss_list) == 1:
return iou_loss_list[0]
return tf.reduce_sum(tf.stack(iou_loss_list), 0)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/losses/losses.py |
"""EfficientDet augmentation module."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/augmentation/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment.
[1] Barret, et al. Learning Data Augmentation Strategies for Object Detection.
Arxiv: https://arxiv.org/abs/1906.11172
"""
import inspect
import math
import tensorflow.compat.v1 as tf
from tensorflow_addons import image as image_ops
from nvidia_tao_tf2.cv.efficientdet.utils import hparams_config
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
# Represents an invalid bounding box that is used for checking for padding
# lists of bounding box coordinates for a few augmentation operations
_INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
def policy_v1():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
[('Color', 0.0, 0), ('ShearX_Only_BBoxes', 0.8, 4)],
[('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 0.0, 10)],
[('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
[('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
[('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)],
[('Cutout', 0.2, 2), ('Brightness', 0.8, 10)],
[('Color', 1.0, 6), ('Equalize', 1.0, 2)],
[('Cutout_Only_BBoxes', 0.4, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
[('Color', 0.2, 8), ('Rotate_BBox', 0.8, 10)],
[('Sharpness', 0.4, 4), ('TranslateY_Only_BBoxes', 0.0, 4)],
[('Sharpness', 1.0, 4), ('SolarizeAdd', 0.4, 4)],
[('Rotate_BBox', 1.0, 8), ('Sharpness', 0.2, 8)],
[('ShearY_BBox', 0.6, 10), ('Equalize_Only_BBoxes', 0.6, 8)],
[('ShearX_BBox', 0.2, 6), ('TranslateY_Only_BBoxes', 0.2, 10)],
[('SolarizeAdd', 0.6, 8), ('Brightness', 0.8, 10)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def policy_v2():
"""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Color', 0.0, 6), ('Cutout', 0.6, 8), ('Sharpness', 0.4, 8)],
[('Rotate_BBox', 0.4, 8), ('Sharpness', 0.4, 2),
('Rotate_BBox', 0.8, 10)],
[('TranslateY_BBox', 1.0, 8), ('AutoContrast', 0.8, 2)],
[('AutoContrast', 0.4, 6), ('ShearX_BBox', 0.8, 8),
('Brightness', 0.0, 10)],
[('SolarizeAdd', 0.2, 6), ('Contrast', 0.0, 10),
('AutoContrast', 0.6, 0)],
[('Cutout', 0.2, 0), ('Solarize', 0.8, 8), ('Color', 1.0, 4)],
[('TranslateY_BBox', 0.0, 4), ('Equalize', 0.6, 8),
('Solarize', 0.0, 10)],
[('TranslateY_BBox', 0.2, 2), ('ShearY_BBox', 0.8, 8),
('Rotate_BBox', 0.8, 8)],
[('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
[('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6), ('Rotate_BBox', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)],
[('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6), ('ShearY_BBox', 0.6, 8)],
[('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
('Brightness', 0.2, 2)],
[('TranslateY_BBox', 0.4, 8), ('Solarize', 0.4, 6),
('SolarizeAdd', 0.2, 10)],
[('Contrast', 1.0, 10), ('SolarizeAdd', 0.2, 8), ('Equalize', 0.2, 4)],
]
return policy
def policy_v3():
"""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Posterize', 0.8, 2), ('TranslateX_BBox', 1.0, 8)],
[('BBox_Cutout', 0.2, 10), ('Sharpness', 1.0, 8)],
[('Rotate_BBox', 0.6, 8), ('Rotate_BBox', 0.8, 10)],
[('Equalize', 0.8, 10), ('AutoContrast', 0.2, 10)],
[('SolarizeAdd', 0.2, 2), ('TranslateY_BBox', 0.2, 8)],
[('Sharpness', 0.0, 2), ('Color', 0.4, 8)],
[('Equalize', 1.0, 8), ('TranslateY_BBox', 1.0, 8)],
[('Posterize', 0.6, 2), ('Rotate_BBox', 0.0, 10)],
[('AutoContrast', 0.6, 0), ('Rotate_BBox', 1.0, 6)],
[('Equalize', 0.0, 4), ('Cutout', 0.8, 10)],
[('Brightness', 1.0, 2), ('TranslateY_BBox', 1.0, 6)],
[('Contrast', 0.0, 2), ('ShearY_BBox', 0.8, 0)],
[('AutoContrast', 0.8, 10), ('Contrast', 0.2, 10)],
[('Rotate_BBox', 1.0, 10), ('Cutout', 1.0, 10)],
[('SolarizeAdd', 0.8, 6), ('Equalize', 0.8, 8)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
# if factor == 0.0:
# return tf.convert_to_tensor(image1)
# if factor == 1.0:
# return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# # Interpolate
# if 1.0 > factor > 0.0:
# # Interpolation means we always stay within 0 and 255.
# return tf.cast(temp, tf.uint8)
# # Extrapolate:
# # We need to clip and then cast.
# return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
result = tf.cond(tf.math.logical_and(tf.greater(factor, 0.0), tf.less(factor, 1.0)),
lambda: tf.cast(temp, tf.uint8),
lambda: tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8))
return result
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
"""Solarize."""
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
"""Solarize with addition."""
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
mean = tf.reduce_mean(tf.cast(degenerate, tf.float32))
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = image_ops.rotate(wrap(image), radians)
return unwrap(image, replace)
def random_shift_bbox(image, bbox, pixel_scaling, replace,
new_min_bbox_coords=None):
"""Move the bbox and the image content to a slightly new random location.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
The potential values for the new min corner of the bbox will be between
[old_min - pixel_scaling * bbox_height/2,
old_min - pixel_scaling * bbox_height/2].
pixel_scaling: A float between 0 and 1 that specifies the pixel range
that the new bbox location will be sampled from.
replace: A one or three value 1D tensor to fill empty pixels.
new_min_bbox_coords: If not None, then this is a tuple that specifies the
(min_y, min_x) coordinates of the new bbox. Normally this is randomly
specified, but this allows it to be manually set. The coordinates are
the absolute coordinates between 0 and image height/width and are int32.
Returns:
The new image that will have the shifted bbox location in it along with
the new bbox that contains the new coordinates.
"""
# Obtains image height and width and create helper clip functions.
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
def clip_y(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_height) - 1)
def clip_x(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_width) - 1)
# Convert bbox to pixel coordinates.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = clip_y(tf.to_int32(image_height * bbox[2]))
max_x = clip_x(tf.to_int32(image_width * bbox[3]))
bbox_height, bbox_width = (max_y - min_y + 1, max_x - min_x + 1)
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Select the new min/max bbox ranges that are used for sampling the
# new min x/y coordinates of the shifted bbox.
minval_y = clip_y(
min_y - tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
maxval_y = clip_y(
min_y + tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
minval_x = clip_x(
min_x - tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
maxval_x = clip_x(
min_x + tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
# Sample and calculate the new unclipped min/max coordinates of the new bbox.
if new_min_bbox_coords is None:
unclipped_new_min_y = tf.random_uniform(
shape=[], minval=minval_y, maxval=maxval_y,
dtype=tf.int32)
unclipped_new_min_x = tf.random_uniform(
shape=[], minval=minval_x, maxval=maxval_x,
dtype=tf.int32)
else:
unclipped_new_min_y, unclipped_new_min_x = (
clip_y(new_min_bbox_coords[0]), clip_x(new_min_bbox_coords[1]))
unclipped_new_max_y = unclipped_new_min_y + bbox_height - 1
unclipped_new_max_x = unclipped_new_min_x + bbox_width - 1
# Determine if any of the new bbox was shifted outside the current image.
# This is used for determining if any of the original bbox content should be
# discarded.
new_min_y, new_min_x, new_max_y, new_max_x = (
clip_y(unclipped_new_min_y), clip_x(unclipped_new_min_x),
clip_y(unclipped_new_max_y), clip_x(unclipped_new_max_x))
shifted_min_y = (new_min_y - unclipped_new_min_y) + min_y
shifted_max_y = max_y - (unclipped_new_max_y - new_max_y)
shifted_min_x = (new_min_x - unclipped_new_min_x) + min_x
shifted_max_x = max_x - (unclipped_new_max_x - new_max_x)
# Create the new bbox tensor by converting pixel integer values to floats.
new_bbox = tf.stack([
tf.to_float(new_min_y) / tf.to_float(image_height),
tf.to_float(new_min_x) / tf.to_float(image_width),
tf.to_float(new_max_y) / tf.to_float(image_height),
tf.to_float(new_max_x) / tf.to_float(image_width)])
# Copy the contents in the bbox and fill the old bbox location
# with gray (128).
bbox_content = image[shifted_min_y:shifted_max_y + 1,
shifted_min_x:shifted_max_x + 1, :]
def mask_and_add_image(
min_y_, min_x_, max_y_, max_x_, mask, content_tensor, image_):
"""Applies mask to bbox region in image then adds content_tensor to it."""
mask = tf.pad(
mask,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]],
constant_values=1)
content_tensor = tf.pad(
content_tensor,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]],
constant_values=0)
return image_ * mask + content_tensor
# Zero out original bbox location.
mask = tf.zeros_like(image)[min_y:max_y + 1, min_x:max_x + 1, :]
grey_tensor = tf.zeros_like(mask) + replace[0]
image = mask_and_add_image(min_y, min_x, max_y, max_x, mask,
grey_tensor, image)
# Fill in bbox content to new bbox location.
mask = tf.zeros_like(bbox_content)
image = mask_and_add_image(
new_min_y, new_min_x, new_max_y, new_max_x, mask, bbox_content, image)
return image, new_bbox
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _scale_bbox_only_op_probability(prob):
"""Reduce the probability of the bbox-only operation.
Probability is reduced so that we do not distort the content of too many
bounding boxes that are close to each other. The value of 3.0 was a chosen
hyper parameter when designing the autoaugment algorithm that we found
empirically to work well.
Args:
prob: Float that is the probability of applying the bbox-only operation.
Returns:
Reduced probability.
"""
return prob / 3.0
def _apply_bbox_augmentation(image, bbox, augmentation_func, *args):
"""Applies augmentation_func to the subsection of image indicated by bbox.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where the bbox location in the image will
have `augmentation_func applied to it.
"""
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Clip to be sure the max values do not fall out of range.
max_y = tf.minimum(max_y, image_height - 1)
max_x = tf.minimum(max_x, image_width - 1)
# Get the sub-tensor that is the image within the bounding box region.
bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
# Apply the augmentation function to the bbox portion of the image.
augmented_bbox_content = augmentation_func(bbox_content, *args)
# Pad the augmented_bbox_content and the mask to match the shape of original
# image.
augmented_bbox_content = tf.pad(
augmented_bbox_content,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]])
# Create a mask that will be used to zero out a part of the original image.
mask_tensor = tf.zeros_like(bbox_content)
mask_tensor = tf.pad(
mask_tensor,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]],
constant_values=1)
# Replace the old bbox content with the new augmented content.
image = image * mask_tensor + augmented_bbox_content
return image
def _concat_bbox(bbox, bboxes):
"""Helper function that concats bbox to bboxes along the first dimension."""
# Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
# we discard bboxes and start the bboxes Tensor with the current bbox.
bboxes_sum_check = tf.reduce_sum(bboxes)
bbox = tf.expand_dims(bbox, 0)
# This check will be true when it is an _INVALID_BOX
bboxes = tf.cond(
tf.equal(bboxes_sum_check, -4.0),
lambda: bbox,
lambda: tf.concat([bboxes, bbox], 0))
return bboxes
def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
augmentation_func, func_changes_bbox,
*args):
"""Applies _apply_bbox_augmentation with probability prob.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
new_bboxes: 2D Tensor that is a list of the bboxes in the image after they
have been altered by aug_func. These will only be changed when
func_changes_bbox is set to true. Each bbox has 4 elements
(min_y, min_x, max_y, max_x) of type float that are the normalized
bbox coordinates between 0 and 1.
prob: Float that is the probability of applying _apply_bbox_augmentation.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A tuple. Fist element is a modified version of image, where the bbox
location in the image will have augmentation_func applied to it if it is
chosen to be called with probability `prob`. The second element is a
Tensor of Tensors of length 4 that will contain the altered bbox after
applying augmentation_func.
"""
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
if func_changes_bbox:
augmented_image, bbox = tf.cond(
should_apply_op,
lambda: augmentation_func(image, bbox, *args),
lambda: (image, bbox))
else:
augmented_image = tf.cond(
should_apply_op,
lambda: _apply_bbox_augmentation(image, bbox, augmentation_func, *args),
lambda: image)
new_bboxes = _concat_bbox(bbox, new_bboxes)
return augmented_image, new_bboxes
def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func, func_changes_bbox, *args):
"""Applies aug_func to the image for each bbox in bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
prob: Float that is the probability of applying aug_func to a specific
bounding box within the image.
aug_func: Augmentation function that will be applied to the
subsections of image indicated by the bbox values in bboxes.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where each bbox location in the image will
have augmentation_func applied to it if it is chosen to be called with
probability prob independently across all bboxes. Also the final
bboxes are returned that will be unchanged if func_changes_bbox is set to
false and if true, the new altered ones will be returned.
"""
# Will keep track of the new altered bboxes after aug_func is repeatedly
# applied. The -1 values are a dummy value and this first Tensor will be
# removed upon appending the first real bbox.
new_bboxes = tf.constant(_INVALID_BOX)
# If the bboxes are empty, then just give it _INVALID_BOX. The result
# will be thrown away.
bboxes = tf.cond(
tf.equal(tf.shape(bboxes)[0], 0),
lambda: tf.constant(_INVALID_BOX),
lambda: bboxes)
bboxes = tf.ensure_shape(bboxes, (None, 4))
# pylint:disable=g-long-lambda
# pylint:disable=line-too-long
wrapped_aug_func = lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper( # noqa pylint: disable=C3001
_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)
# pylint:enable=g-long-lambda
# pylint:enable=line-too-long
# Setup the while_loop.
num_bboxes = tf.shape(bboxes)[0] # We loop until we go over all bboxes.
idx = tf.constant(0) # Counter for the while loop.
# Conditional function when to end the loop once we go over all bboxes
# images_and_bboxes contain (_image, _new_bboxes)
cond = lambda _idx, _images_and_bboxes: tf.less(_idx, num_bboxes) # noqa pylint: disable=C3001
# Shuffle the bboxes so that the augmentation order is not deterministic if
# we are not changing the bboxes with aug_func.
if not func_changes_bbox:
loop_bboxes = tf.random.shuffle(bboxes)
else:
loop_bboxes = bboxes
# Main function of while_loop where we repeatedly apply augmentation on the
# bboxes in the image.
# pylint:disable=g-long-lambda
body = lambda _idx, _images_and_bboxes: [ # noqa pylint: disable=C3001
_idx + 1,
wrapped_aug_func(_images_and_bboxes[0],
loop_bboxes[_idx],
_images_and_bboxes[1])]
# pylint:enable=g-long-lambda
_, (image, new_bboxes) = tf.while_loop(
cond, body, [idx, (image, new_bboxes)],
shape_invariants=[idx.get_shape(), (image.get_shape(), tf.TensorShape([None, 4]))])
# Either return the altered bboxes or the original ones depending on if
# we altered them in anyway.
if func_changes_bbox:
final_bboxes = new_bboxes
else:
final_bboxes = bboxes
return image, final_bboxes
def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func, func_changes_bbox, *args):
"""Checks to be sure num bboxes > 0 before calling inner function."""
num_bboxes = tf.shape(bboxes)[0]
image, bboxes = tf.cond(
tf.equal(num_bboxes, 0),
lambda: (image, bboxes),
# pylint:disable=g-long-lambda
lambda: _apply_multi_bbox_augmentation(
image, bboxes, prob, aug_func, func_changes_bbox, *args))
# pylint:enable=g-long-lambda
return image, bboxes
def rotate_only_bboxes(image, bboxes, prob, degrees, replace):
"""Apply rotate to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, rotate, func_changes_bbox, degrees, replace)
def shear_x_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_x, func_changes_bbox, level, replace)
def shear_y_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_y, func_changes_bbox, level, replace)
def translate_x_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_x, func_changes_bbox, pixels, replace)
def translate_y_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace)
def flip_only_bboxes(image, bboxes, prob):
"""Apply flip_lr to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, tf.image.flip_left_right, func_changes_bbox)
def solarize_only_bboxes(image, bboxes, prob, threshold):
"""Apply solarize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, solarize, func_changes_bbox, threshold)
def equalize_only_bboxes(image, bboxes, prob):
"""Apply equalize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, equalize, func_changes_bbox)
def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
"""Apply cutout to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, cutout, func_changes_bbox, pad_size, replace)
def _rotate_bbox(bbox, image_height, image_width, degrees):
"""Rotates the bbox coordinated by degrees.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
A tensor of the same shape as bbox, but now with the rotated coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# Translate the bbox to the center of the image and turn the normalized 0-1
# coordinates to absolute pixel locations.
# Y coordinates are made negative as the y axis of images goes down with
# increasing pixel values, so we negate to make sure x axis and y axis points
# are in the traditionally positive direction.
min_y = -tf.to_int32(image_height * (bbox[0] - 0.5))
min_x = tf.to_int32(image_width * (bbox[1] - 0.5))
max_y = -tf.to_int32(image_height * (bbox[2] - 0.5))
max_x = tf.to_int32(image_width * (bbox[3] - 0.5))
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Rotate the coordinates according to the rotation matrix clockwise if
# radians is positive, else negative
rotation_matrix = tf.stack(
[[tf.cos(radians), tf.sin(radians)],
[-tf.sin(radians), tf.cos(radians)]])
new_coords = tf.cast(
tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to normalized 0-1 floats.
min_y = -(tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height - 0.5)
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width + 0.5
max_y = -(tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height - 0.5)
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width + 0.5
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def rotate_with_bboxes(image, bboxes, degrees, replace):
"""Equivalent of PIL Rotate that rotates the image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of rotating
image by degrees. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the rotated image.
"""
# Rotate the image.
image = rotate(image, degrees, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_rotate_bbox = lambda bbox: _rotate_bbox( # noqa pylint: disable=C3001
bbox, image_height, image_width, degrees)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_rotate_bbox, bboxes)
return image, bboxes
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = image_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = image_ops.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
pixels = tf.to_int32(pixels)
# Convert bbox to integer pixel locations.
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.to_float(min_y) / tf.to_float(image_height)
min_x = tf.to_float(min_x) / tf.to_float(image_width)
max_y = tf.to_float(max_y) / tf.to_float(image_height)
max_x = tf.to_float(max_x) / tf.to_float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def translate_bbox(image, bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
"""
if shift_horizontal:
image = translate_x(image, pixels, replace)
else:
image = translate_y(image, pixels, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shift_bbox = lambda bbox: _shift_bbox( # noqa pylint: disable=C3001
bbox, image_height, image_width, pixels, shift_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, bboxes
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = image_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = image_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Change bbox coordinates to be pixels.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width
max_y = tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
"""
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox( # noqa pylint: disable=C3001
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def _cutout_inside_bbox(image, bbox, pad_fraction):
"""Generates cutout mask and the mean pixel value of the bbox.
First a location is randomly chosen within the image as the center where the
cutout mask will be applied. Note this can be towards the boundaries of the
image, so the full cutout mask may not be applied.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
Returns:
A tuple. Fist element is a tensor of the same shape as image where each
element is either a 1 or 0 that is used to determine where the image
will have cutout applied. The second element is the mean of the pixels
in the image where the bbox is located.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Transform from shape [1, 4] to [4].
bbox = tf.squeeze(bbox)
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
# Calculate the mean pixel values in the bounding box, which will be used
# to fill the cutout region.
mean = tf.reduce_mean(
image[min_y:max_y + 1, min_x:max_x + 1],
reduction_indices=[0, 1])
# Cutout mask will be size pad_size_heigh * 2 by pad_size_width * 2 if the
# region lies entirely within the bbox.
box_height = max_y - min_y + 1
box_width = max_x - min_x + 1
pad_size_height = tf.to_int32(pad_fraction * (box_height / 2))
pad_size_width = tf.to_int32(pad_fraction * (box_width / 2))
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=min_y, maxval=max_y + 1,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=min_x, maxval=max_x + 1,
dtype=tf.int32)
lower_pad = tf.maximum(
0, cutout_center_height - pad_size_height)
upper_pad = tf.maximum(
0, image_height - cutout_center_height - pad_size_height)
left_pad = tf.maximum(
0, cutout_center_width - pad_size_width)
right_pad = tf.maximum(
0, image_width - cutout_center_width - pad_size_width)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, 2)
mask = tf.tile(mask, [1, 1, 3])
return mask, mean
def bbox_cutout(image, bboxes, pad_fraction, replace_with_mean):
"""Applies cutout to the image according to bbox information.
This is a cutout variant that using bbox information to make more informed
decisions on where to place the cutout mask.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
replace_with_mean: Boolean that specified what value should be filled in
where the cutout mask is applied. Since the incoming image will be of
uint8 and will not have had any mean normalization applied, by default
we set the value to be 128. If replace_with_mean is True then we find
the mean pixel values across the channel dimension and use those to fill
in where the cutout mask is applied.
Returns:
A tuple. First element is a tensor of the same shape as image that has
cutout applied to it. Second element is the bboxes that were passed in
that will be unchanged.
"""
def apply_bbox_cutout(image, bboxes, pad_fraction):
"""Applies cutout to a single bounding box within image."""
# Choose a single bounding box to apply cutout to.
random_index = tf.random_uniform(
shape=[], maxval=tf.shape(bboxes)[0], dtype=tf.int32)
# Select the corresponding bbox and apply cutout.
chosen_bbox = tf.gather(bboxes, random_index)
mask, mean = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
# When applying cutout we either set the pixel value to 128 or to the mean
# value inside the bbox.
replace = mean if replace_with_mean else 128
# Apply the cutout mask to the image. Where the mask is 0 we fill it with
# `replace`.
image = tf.where(
tf.equal(mask, 0),
tf.cast(tf.ones_like(image, dtype=image.dtype) * replace,
dtype=image.dtype),
image)
return image
# Check to see if there are boxes, if so then apply boxcutout.
image = tf.cond(tf.equal(
tf.shape(bboxes)[0], 0), lambda: image,
lambda: apply_bbox_cutout(image, bboxes, pad_fraction))
return image, bboxes
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'Cutout': cutout,
'BBox_Cutout': bbox_cutout,
'Rotate_BBox': rotate_with_bboxes,
# pylint:disable=g-long-lambda
'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=True),
'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=False),
'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=True),
'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=False),
# pylint:enable=g-long-lambda
'Rotate_Only_BBoxes': rotate_only_bboxes,
'ShearX_Only_BBoxes': shear_x_only_bboxes,
'ShearY_Only_BBoxes': shear_y_only_bboxes,
'TranslateX_Only_BBoxes': translate_x_only_bboxes,
'TranslateY_Only_BBoxes': translate_y_only_bboxes,
'Flip_Only_BBoxes': flip_only_bboxes,
'Solarize_Only_BBoxes': solarize_only_bboxes,
'Equalize_Only_BBoxes': equalize_only_bboxes,
'Cutout_Only_BBoxes': cutout_only_bboxes,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level / _MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level / _MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _bbox_cutout_level_to_arg(level, hparams):
cutout_pad_fraction = (level / _MAX_LEVEL) * hparams.cutout_max_pad_fraction
return (cutout_pad_fraction,
hparams.cutout_bbox_replace_with_mean)
def level_to_arg(hparams):
"""Level to arg."""
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Posterize': lambda level: (int((level / _MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level / _MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level / _MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'Cutout': lambda level: (int((level / _MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'BBox_Cutout': lambda level: _bbox_cutout_level_to_arg(
level, hparams),
'TranslateX_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
'ShearX_BBox': _shear_level_to_arg,
'ShearY_BBox': _shear_level_to_arg,
'Rotate_BBox': _rotate_level_to_arg,
'Rotate_Only_BBoxes': _rotate_level_to_arg,
'ShearX_Only_BBoxes': _shear_level_to_arg,
'ShearY_Only_BBoxes': _shear_level_to_arg,
# pylint:disable=g-long-lambda
'TranslateX_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
'TranslateY_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
# pylint:enable=g-long-lambda
'Flip_Only_BBoxes': lambda level: (),
'Solarize_Only_BBoxes': lambda level: (int((level / _MAX_LEVEL) * 256),),
'Equalize_Only_BBoxes': lambda level: (),
# pylint:disable=g-long-lambda
'Cutout_Only_BBoxes': lambda level: (
int((level / _MAX_LEVEL) * hparams.cutout_bbox_const),),
# pylint:enable=g-long-lambda
}
def bbox_wrapper(func):
"""Adds a bboxes function argument to func and returns unchanged bboxes."""
def wrapper(images, bboxes, *args, **kwargs):
return (func(images, *args, **kwargs), bboxes)
return wrapper
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getfullargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
if 'replace' in inspect.getfullargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getfullargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# Add bboxes as the second positional argument for the function if it does
# not already exist.
if 'bboxes' not in inspect.getfullargspec(func)[0]:
func = bbox_wrapper(func)
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob, bboxes):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
assert 'bboxes' == inspect.getfullargspec(func)[0][1]
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
if 'prob' in inspect.getfullargspec(func)[0]:
prob = 1.0
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image, augmented_bboxes = tf.cond(
should_apply_op,
lambda: func(image, bboxes, *args),
lambda: (image, bboxes))
return augmented_image, augmented_bboxes
def select_and_apply_random_policy(policies, image, bboxes):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image, bboxes = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image, bboxes),
lambda: (image, bboxes))
return (image, bboxes)
def build_and_apply_nas_policy(policies, image, bboxes, augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
bboxes: tf.Tensor of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function. Additionally, returns bboxes if
a value for them is passed in that is not None
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_, bboxes_):
for func, prob, args in tf_policy_:
image_, bboxes_ = _apply_func_with_prob(
func, image_, args, prob, bboxes_)
return image_, bboxes_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_images, augmented_bboxes = select_and_apply_random_policy(
tf_policies, image, bboxes)
# If no bounding boxes were specified, then just return the images.
return (augmented_images, augmented_bboxes)
@tf.autograph.experimental.do_not_convert
def distort_image_with_autoaugment(image,
bboxes,
augmentation_name):
"""Applies the AutoAugment policy to `image` and `bboxes`.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
bboxes: `Tensor` of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image` and `bboxes`.
"""
available_policies = {'v0': policy_v0, 'v1': policy_v1, 'v2': policy_v2,
'v3': policy_v3, 'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError(f'Invalid augmentation_name: {augmentation_name}')
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = hparams_config.Config(dict( # noqa pylint: disable=R1735
cutout_max_pad_fraction=0.75,
cutout_bbox_replace_with_mean=False,
cutout_const=100,
translate_const=250,
cutout_bbox_const=50,
translate_bbox_const=120))
return build_and_apply_nas_policy(policy, image, bboxes, augmentation_hparams)
def distort_image_with_autocolor(image, bboxes, num_layers, magnitude):
"""Applies random color augmentation to `image` and `bboxes`."""
replace_value = [128, 128, 128]
augmentation_hparams = hparams_config.Config(
dict( # noqa pylint: disable=R1735
cutout_max_pad_fraction=0.75,
cutout_bbox_replace_with_mean=False,
cutout_const=100,
translate_const=100,
cutout_bbox_const=50,
translate_bbox_const=120))
available_ops = [
'Equalize',
'Color',
'Contrast',
'Brightness',
'Sharpness',
]
if bboxes is None:
bboxes = tf.constant(0.0)
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
with tf.name_scope(f'auto_color_{layer_num}'):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
rand_magnitude = tf.random.uniform(shape=[], maxval=magnitude, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, rand_magnitude, replace_value, augmentation_hparams)
image, bboxes = tf.cond(
tf.equal(i, op_to_select),
lambda fn=func, fn_args=args: fn(image, bboxes, *fn_args),
lambda: (image, bboxes))
return (image, bboxes)
def distort_image_with_autotranslate(image, bboxes, num_layers, magnitude):
"""Applies random XY translation to `image` and `bboxes`."""
replace_value = [128, 128, 128]
augmentation_hparams = hparams_config.Config(
dict( # noqa pylint: disable=R1735
cutout_max_pad_fraction=0.75,
cutout_bbox_replace_with_mean=False,
cutout_const=100,
translate_const=50,
cutout_bbox_const=75,
translate_bbox_const=120))
available_ops = ['TranslateX_BBox', 'TranslateY_BBox']
if bboxes is None:
bboxes = tf.constant(0.0)
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
with tf.name_scope(f'auto_translate_{layer_num}'):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
rand_magnitude = tf.random.uniform(shape=[], maxval=magnitude, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, rand_magnitude, replace_value, augmentation_hparams)
image, bboxes = tf.cond(
tf.equal(i, op_to_select),
lambda fn=func, fn_args=args: fn(image, bboxes, *fn_args),
lambda: (image, bboxes))
return (image, bboxes)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/augmentation/autoaugment.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Weighted fusion layer."""
import tensorflow as tf
class WeightedFusion(tf.keras.layers.Layer):
"""Weighted Fusion Layer (Fast Attention)."""
def __init__(self, inputs_offsets=None, **kwargs):
"""Init."""
super().__init__(**kwargs)
self.inputs_offsets = inputs_offsets
self.vars = []
def build(self, input_shape=None):
"""Build."""
for i, _ in enumerate(self.inputs_offsets):
name = 'WSM' + ('' if i == 0 else '_' + str(i))
self.vars.append(self.add_weight(initializer='ones', name=name))
def call(self, inputs):
"""Call."""
dtype = inputs[0].dtype
edge_weights = []
for var in self.vars:
var = tf.nn.relu(tf.cast(var, dtype=dtype))
edge_weights.append(var)
weights_sum = tf.add_n(edge_weights)
inputs = [
inputs[i] * edge_weights[i] / (weights_sum + 1e-4)
for i in range(len(inputs))
]
new_node = tf.add_n(inputs)
return new_node
def get_config(self):
"""Config."""
config = super().get_config()
config.update({
'inputs_offsets': self.inputs_offsets
})
return config
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/layers/weighted_fusion_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet custom layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
class ImageResizeLayer(keras.layers.Layer):
"""A Keras layer to wrap tf.image.resize_nearst_neighbor function."""
def __init__(self,
target_height=128,
target_width=128,
data_format='channels_last',
**kwargs):
"""Init function."""
self.height = target_height
self.width = target_width
self.data_format = data_format
super().__init__(**kwargs)
def call(self, inputs):
"""Resize."""
if self.data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 2, 3, 1])
resized = tf.cast(tf.compat.v1.image.resize_nearest_neighbor(
tf.cast(inputs, tf.float32), [self.height, self.width]), dtype=inputs.dtype)
if self.data_format == 'channels_first':
resized = tf.transpose(resized, [0, 3, 1, 2])
return resized
def get_config(self):
"""Keras layer get config."""
config = {
'target_height': self.height,
'target_width': self.width,
'data_format': self.data_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/layers/image_resize_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO TF2 EfficientDet layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/layers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to store default Efficientdet hyperparameters."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import List, Optional
from dataclasses import dataclass, field
from omegaconf import MISSING
from nvidia_tao_tf2.common.config.mlops import ClearMLConfig, WandBConfig
@dataclass
class LoaderConfig:
"""Dataloader config."""
shuffle_buffer: int = 10000
cycle_length: int = 32
block_length: int = 16
shuffle_file: bool = True
prefetch_size: int = 2
@dataclass
class LRConfig:
"""LR config."""
name: str = 'cosine' # soft_anneal
warmup_epoch: int = 5
warmup_init: float = 0.0001
learning_rate: float = 0.2
annealing_epoch: int = 10
@dataclass
class OptConfig:
"""Optimizer config."""
name: str = 'sgd'
momentum: float = 0.9
@dataclass
class TrainConfig:
"""Train config."""
init_epoch: int = 0
optimizer: OptConfig = OptConfig()
lr_schedule: LRConfig = LRConfig()
num_examples_per_epoch: int = 120000
batch_size: int = 8
num_epochs: int = 300
checkpoint: str = ""
random_seed: int = 42
l1_weight_decay: float = 0.0
l2_weight_decay: float = 0.00004
amp: bool = False
pruned_model_path: str = ''
moving_average_decay: float = 0.9999
clip_gradients_norm: float = 10.0
skip_checkpoint_variables: str = ''
checkpoint_interval: int = 10
image_preview: bool = True
qat: bool = False
label_smoothing: float = 0.0
box_loss_weight: float = 50.0
iou_loss_type: str = ""
iou_loss_weight: float = 1.0
wandb: WandBConfig = WandBConfig(
name="efficientdet",
tags=["efficientdet", "training", "tao-toolkit"]
)
clearml: ClearMLConfig = ClearMLConfig(
task="efficientdet_train",
tags=["efficientdet", "training", "tao-toolkit"]
)
results_dir: Optional[str] = None
@dataclass
class ModelConfig:
"""Model config."""
name: str = 'efficientdet-d0'
aspect_ratios: str = '[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]'
anchor_scale: int = 4
min_level: int = 3
max_level: int = 7
num_scales: int = 3
freeze_bn: bool = False
freeze_blocks: List[int] = field(default_factory=lambda: [])
input_width: int = 512
input_height: int = 512
@dataclass
class AugmentationConfig:
"""Augmentation config."""
rand_hflip: bool = True
random_crop_min_scale: float = 0.1
random_crop_max_scale: float = 2
auto_color_distortion: bool = False
auto_translate_xy: bool = False
@dataclass
class DataConfig:
"""Data config."""
train_tfrecords: List[str] = field(default_factory=lambda: [])
train_dirs: List[str] = field(default_factory=lambda: []) # TODO
val_tfrecords: List[str] = field(default_factory=lambda: [])
val_dirs: List[str] = field(default_factory=lambda: []) # TODO
val_json_file: str = ""
num_classes: int = 91
max_instances_per_image: int = 200
skip_crowd_during_training: bool = True
use_fake_data: bool = False
loader: LoaderConfig = LoaderConfig()
augmentation: AugmentationConfig = AugmentationConfig()
@dataclass
class EvalConfig:
"""Eval config."""
batch_size: int = 8
num_samples: int = 5000
max_detections_per_image: int = 100
label_map: str = ''
max_nms_inputs: int = 5000
checkpoint: str = ''
trt_engine: Optional[str] = None
start_eval_epoch: int = 1
sigma: float = 0.5
results_dir: Optional[str] = None
@dataclass
class ExportConfig:
"""Export config."""
batch_size: int = 8
dynamic_batch_size: bool = True
min_score_thresh: float = 0.01
checkpoint: str = MISSING
onnx_file: str = MISSING
results_dir: Optional[str] = None
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: str = ""
cal_cache_file: str = ""
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "fp32"
max_workspace_size: int = 2 # in Gb
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class GenTrtEngineConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
onnx_file: str = MISSING
trt_engine: Optional[str] = None
tensorrt: TrtConfig = TrtConfig()
@dataclass
class InferenceConfig:
"""Inference config."""
checkpoint: str = MISSING
trt_engine: Optional[str] = None
image_dir: str = MISSING
results_dir: Optional[str] = None
dump_label: bool = False
batch_size: int = 1
min_score_thresh: float = 0.3
label_map: str = ''
max_boxes_to_draw: int = 100
@dataclass
class PruneConfig:
"""Pruning config."""
checkpoint: str = MISSING
normalizer: str = 'max'
results_dir: Optional[str] = None
equalization_criterion: str = 'union'
granularity: int = 8
threshold: float = MISSING
min_num_filters: int = 16
excluded_layers: List[str] = field(default_factory=lambda: [])
@dataclass
class DatasetConvertConfig:
"""Dataset Convert config."""
image_dir: str = MISSING
annotations_file: str = MISSING
results_dir: Optional[str] = None
tag: str = ''
num_shards: int = 256
include_masks: bool = False
@dataclass
class ExperimentConfig:
"""Experiment config."""
train: TrainConfig = TrainConfig()
model: ModelConfig = ModelConfig()
evaluate: EvalConfig = EvalConfig()
dataset: DataConfig = DataConfig()
export: ExportConfig = ExportConfig()
inference: InferenceConfig = InferenceConfig()
prune: PruneConfig = PruneConfig()
dataset_convert: DatasetConvertConfig = DatasetConvertConfig()
gen_trt_engine: GenTrtEngineConfig = GenTrtEngineConfig()
encryption_key: Optional[str] = None
data_format: str = 'channels_last'
results_dir: str = MISSING
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the pruner for EfficientDet."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/pruner/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO EfficientDet model pruner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from nvidia_tao_tf2.blocks.pruner import Pruner
from nvidia_tao_tf2.cv.efficientdet.utils.helper import load_model
import nvidia_tao_tf2.common.no_warning # noqa pylint: disable=W0611
logger = logging.getLogger(__name__)
class EfficientDetPruner(Pruner):
"""EfficientDet Pruner."""
def _load_model(self):
"""Load model."""
self.model = load_model(self.model_path, self.cfg)
self.excluded_layers = self.model.output_names
self.excluded_layers.extend(
['box-0', 'box-1', 'box-2', 'class-0', 'class-1', 'class-2'])
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/pruner/pruner.py |
"""Postprocessing for anchor-based detection."""
import logging
from typing import List, Tuple
import functools
import tensorflow as tf
from nvidia_tao_tf2.blocks.processor.postprocessor import Postprocessor
from nvidia_tao_tf2.cv.efficientdet.utils import nms_utils
from nvidia_tao_tf2.cv.efficientdet.model import anchors
T = tf.Tensor # a shortcut for typing check.
CLASS_OFFSET = 1
def to_list(inputs):
"""Convert to list."""
if isinstance(inputs, dict):
return [inputs[k] for k in sorted(inputs.keys())]
if isinstance(inputs, list):
return inputs
raise ValueError(f'Unrecognized inputs : {inputs}')
class EfficientDetPostprocessor(Postprocessor):
"""EfficientDet Postprocessor."""
def __init__(self, params):
"""Init."""
self.params = params
def generate_detections(self,
cls_outputs,
box_outputs,
image_scales,
image_ids,
flip=False,
use_pyfunc=True):
"""A legacy interface for generating [id, x, y, w, h, score, class]."""
_, width = self.params.image_size
original_image_widths = tf.expand_dims(image_scales, -1) * width
if use_pyfunc:
detections_bs = []
boxes, scores, classes = self.pre_nms(cls_outputs, box_outputs)
for index in range(boxes.shape[0]):
nms_configs = {
'method': 'gaussian',
'iou_thresh': None, # use the default value based on method.
'score_thresh': 0.,
'sigma': self.params.eval_sigma,
'max_nms_inputs': self.params.max_nms_inputs,
'max_output_size': self.params.max_output_size,
}
detections = tf.numpy_function(
functools.partial(nms_utils.per_class_nms, nms_configs=nms_configs), [
boxes[index],
scores[index],
classes[index],
tf.slice(image_ids, [index], [1]),
tf.slice(image_scales, [index], [1]),
self.params.num_classes,
nms_configs['max_output_size'],
], tf.float32)
detections_bs.append(detections)
return tf.stack(detections_bs, axis=0, name='detections')
nms_boxes_bs, nms_scores_bs, nms_classes_bs, _ = self.postprocess_per_class(
cls_outputs, box_outputs, image_scales)
image_ids_bs = tf.cast(tf.expand_dims(image_ids, -1), nms_scores_bs.dtype)
if flip:
detections_bs = [
image_ids_bs * tf.ones_like(nms_scores_bs),
# the mirrored location of the left edge is the image width
# minus the position of the right edge
original_image_widths - nms_boxes_bs[:, :, 3],
nms_boxes_bs[:, :, 0],
# the mirrored location of the right edge is the image width
# minus the position of the left edge
original_image_widths - nms_boxes_bs[:, :, 1],
nms_boxes_bs[:, :, 2],
nms_scores_bs,
nms_classes_bs,
]
else:
detections_bs = [
image_ids_bs * tf.ones_like(nms_scores_bs),
nms_boxes_bs[:, :, 1],
nms_boxes_bs[:, :, 0],
nms_boxes_bs[:, :, 3],
nms_boxes_bs[:, :, 2],
nms_scores_bs,
nms_classes_bs,
]
return tf.stack(detections_bs, axis=-1, name='detections')
def postprocess_per_class(self, cls_outputs, box_outputs, image_scales=None):
"""Post processing with per class NMS.
An accurate but relatively slow version of NMS. The idea is to perform NMS for
each class, and then combine them.
Args:
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor denotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classes, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = self.pre_nms(cls_outputs, box_outputs)
return self.per_class_nms(boxes, scores, classes, image_scales)
def per_class_nms(self, boxes, scores, classes, image_scales=None):
"""Per-class nms, a utility for postprocess_per_class.
Args:
boxes: A tensor with shape [N, K, 4], where N is batch_size, K is num_boxes.
Box format is [y_min, x_min, y_max, x_max].
scores: A tensor with shape [N, K].
classes: A tensor with shape [N, K].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classes, valid_len) after nms.
"""
def single_batch_fn(element):
"""A mapping function for a single batch."""
boxes_i, scores_i, classes_i = element[0], element[1], element[2]
nms_boxes_cls, nms_scores_cls, nms_classes_cls = [], [], []
nms_valid_len_cls = []
for cid in range(self.params.num_classes):
indices = tf.where(tf.equal(classes_i, cid))
if indices.shape[0] == 0:
continue
classes_cls = tf.gather_nd(classes_i, indices)
boxes_cls = tf.gather_nd(boxes_i, indices)
scores_cls = tf.gather_nd(scores_i, indices)
nms_boxes, nms_scores, nms_classes, nms_valid_len = self.nms(
boxes_cls, scores_cls, classes_cls, False)
nms_boxes_cls.append(nms_boxes)
nms_scores_cls.append(nms_scores)
nms_classes_cls.append(nms_classes)
nms_valid_len_cls.append(nms_valid_len)
# Pad zeros and select topk.
max_output_size = self.params.max_output_size or 100
nms_boxes_cls = tf.pad(
tf.concat(nms_boxes_cls, 0), [[0, max_output_size], [0, 0]])
nms_scores_cls = tf.pad(
tf.concat(nms_scores_cls, 0), [[0, max_output_size]])
nms_classes_cls = tf.pad(
tf.concat(nms_classes_cls, 0), [[0, max_output_size]])
nms_valid_len_cls = tf.stack(nms_valid_len_cls)
_, indices = tf.math.top_k(nms_scores_cls, k=max_output_size, sorted=True)
return tuple((
tf.gather(nms_boxes_cls, indices),
tf.gather(nms_scores_cls, indices),
tf.gather(nms_classes_cls, indices),
tf.minimum(max_output_size, tf.reduce_sum(nms_valid_len_cls))))
nms_boxes, nms_scores, nms_classes, nms_valid_len = self.batch_map_fn(
single_batch_fn, [boxes, scores, classes])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def nms(self, boxes: T, scores: T, classes: T,
padded: bool) -> Tuple[T, T, T, T]:
"""Non-maximum suppression.
Args:
boxes: a tensor with shape [N, 4], where N is the number of boxes. Box
format is [y_min, x_min, y_max, x_max].
scores: a tensor with shape [N].
classes: a tensor with shape [N].
padded: a bool vallue indicating whether the results are padded.
Returns:
A tuple (boxes, scores, classes, valid_lens), where valid_lens is a scalar
denoting the valid length of boxes/scores/classes outputs.
"""
method = 'gaussian'
max_output_size = 100
sigma = self.params.eval_sigma
if method == 'hard' or not method:
# hard nms.
sigma = 0.0
iou_thresh = 0.5
score_thresh = float('-inf')
elif method == 'gaussian':
iou_thresh = 1.0
score_thresh = 0.001
else:
raise ValueError(f'Inference has invalid nms method {method}')
# TF API's sigma is twice as the paper's value, so here we divide it by 2:
# https://github.com/tensorflow/tensorflow/issues/40253.
nms_top_idx, nms_scores, nms_valid_lens = tf.raw_ops.NonMaxSuppressionV5(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
soft_nms_sigma=(sigma / 2),
pad_to_max_output_size=padded)
nms_boxes = tf.gather(boxes, nms_top_idx)
nms_classes = tf.cast(
tf.gather(classes, nms_top_idx) + CLASS_OFFSET, tf.float32)
return nms_boxes, nms_scores, nms_classes, nms_valid_lens
def pre_nms(self, cls_outputs, box_outputs, topk=True):
"""Detection post processing before nms.
It takes the multi-level class and box predictions from network, merge them
into unified tensors, and compute boxes, scores, and classes.
Args:
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors].
topk: if True, select topk before nms (mainly to speed up nms).
Returns:
A tuple of (boxes, scores, classes).
"""
# get boxes by apply bounding box regression to anchors.
eval_anchors = anchors.Anchors(self.params['min_level'],
self.params['max_level'],
self.params['num_scales'],
self.params['aspect_ratios'],
self.params['anchor_scale'],
self.params['image_size'])
cls_outputs, box_outputs = self.merge_class_box_level_outputs(
cls_outputs, box_outputs)
if topk:
# select topK purely based on scores before NMS, in order to speed up nms.
cls_outputs, box_outputs, classes, indices = self.topk_class_boxes(
cls_outputs, box_outputs)
anchor_boxes = tf.gather(eval_anchors.boxes, indices)
else:
anchor_boxes = eval_anchors.boxes
classes = None
boxes = anchors.decode_box_outputs(box_outputs, anchor_boxes)
# convert logits to scores.
scores = tf.math.sigmoid(cls_outputs)
return boxes, scores, classes
def batch_map_fn(self, map_fn, inputs, *args):
"""Apply map_fn at batch dimension."""
if isinstance(inputs[0], (list, tuple)):
batch_size = len(inputs[0])
else:
batch_size = inputs[0].shape.as_list()[0]
if not batch_size:
# handle dynamic batch size: tf.vectorized_map is faster than tf.map_fn.
return tf.vectorized_map(map_fn, inputs, *args)
outputs = []
for i in range(batch_size):
outputs.append(map_fn([x[i] for x in inputs]))
return [tf.stack(y) for y in zip(*outputs)]
def merge_class_box_level_outputs(self, cls_outputs: List[T],
box_outputs: List[T]) -> Tuple[T, T]:
"""Concatenates class and box of all levels into one tensor."""
cls_outputs_all, box_outputs_all = [], []
batch_size = tf.shape(cls_outputs[0])[0]
for level in range(0, self.params.max_level - self.params.min_level + 1):
if self.params['data_format'] == 'channels_first':
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, self.params.num_classes]))
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
return tf.concat(cls_outputs_all, 1), tf.concat(box_outputs_all, 1)
def topk_class_boxes(self, cls_outputs: T,
box_outputs: T) -> Tuple[T, T, T, T]:
"""Pick the topk class and box outputs."""
batch_size = tf.shape(cls_outputs)[0]
num_classes = self.params['num_classes']
max_nms_inputs = self.params.max_nms_inputs or 100
if max_nms_inputs > 0:
# Prune anchors and detections to only keep max_nms_inputs.
# Due to some issues, top_k is currently slow in graph model.
logging.info('use max_nms_inputs for pre-nms topk.')
cls_outputs_reshape = tf.reshape(cls_outputs, [batch_size, -1])
_, cls_topk_indices = tf.math.top_k(
cls_outputs_reshape, k=max_nms_inputs, sorted=False)
indices = cls_topk_indices // num_classes
classes = cls_topk_indices % num_classes
cls_indices = tf.stack([indices, classes], axis=2)
cls_outputs_topk = tf.gather_nd(cls_outputs, cls_indices, batch_dims=1)
box_outputs_topk = tf.gather_nd(
box_outputs, tf.expand_dims(indices, 2), batch_dims=1)
else:
logging.info('use max_reduce for pre-nms topk.')
# Keep all anchors, but for each anchor, just keep the max probablity for
# each class.
cls_outputs_idx = tf.math.argmax(cls_outputs, axis=-1, output_type=tf.int32)
num_anchors = tf.shape(cls_outputs)[1]
classes = cls_outputs_idx
indices = tf.tile(
tf.expand_dims(tf.range(num_anchors), axis=0), [batch_size, 1])
cls_outputs_topk = tf.reduce_max(cls_outputs, -1)
box_outputs_topk = box_outputs
return cls_outputs_topk, box_outputs_topk, classes, indices
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/processor/postprocessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the processors for EfficientDet."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/processor/__init__.py |
"""EfficientDet Preprocessing."""
from nvidia_tao_tf2.blocks.processor.preprocessor import Preprocessor
class EfficientDetPreprocessor(Preprocessor):
"""EfficientDet Preprocessor."""
def __init__(self, images, output_size):
"""Init."""
pass
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/processor/preprocessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet QAT pipeline tests."""
from datetime import datetime
import omegaconf
import pytest
import os
import shutil
import horovod.tensorflow.keras as hvd
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.scripts.train import run_experiment as run_train
from nvidia_tao_tf2.cv.efficientdet.scripts.evaluate import run_experiment as run_evaluate
from nvidia_tao_tf2.cv.efficientdet.scripts.export import run_export
TMP_MODEL_DIR = '/home/scratch.metropolis2/tao_ci/tao_tf2/models/tmp'
DATA_DIR = '/home/scratch.metropolis2/tao_ci/tao_tf2/data/coco'
time_str = datetime.now().strftime("%y_%m_%d_%H:%M:%S")
hvd.init()
@pytest.fixture(scope='function')
def cfg():
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
spec_file = os.path.join(parent_dir, 'experiment_specs', 'default.yaml')
default_cfg = omegaconf.OmegaConf.load(spec_file)
default_cfg.dataset.train_tfrecords = [DATA_DIR + '/val-*']
default_cfg.dataset.val_tfrecords = [DATA_DIR + '/val-*']
default_cfg.dataset.val_json_file = os.path.join(DATA_DIR, "annotations/instances_val2017.json")
default_cfg.train.num_examples_per_epoch = 128
default_cfg.train.checkpoint = ''
default_cfg.train.checkpoint_interval = 1
default_cfg.evaluate.num_samples = 10
return default_cfg
@pytest.mark.parametrize("amp, qat, batch_size, num_epochs",
[(False, True, 4, 1)])
def test_train(amp, qat, batch_size, num_epochs, cfg):
# reset graph precision
policy = tf.keras.mixed_precision.Policy('float32')
tf.keras.mixed_precision.set_global_policy(policy)
results_dir = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}")
if os.path.exists(results_dir):
shutil.rmtree(results_dir)
cfg.train.num_epochs = num_epochs
cfg.train.amp = amp
cfg.train.qat = qat
cfg.train.batch_size = batch_size
cfg.results_dir = results_dir
run_train(cfg)
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
@pytest.mark.parametrize("amp, qat, batch_size, num_epochs",
[(False, True, 4, 1)])
def test_eval(amp, qat, batch_size, num_epochs, cfg):
# reset graph precision
policy = tf.keras.mixed_precision.Policy('float32')
tf.keras.mixed_precision.set_global_policy(policy)
cfg.train.num_epochs = num_epochs
cfg.train.amp = amp
cfg.train.qat = qat
cfg.evaluate.checkpoint = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
f'efficientdet-d0_00{num_epochs}.tlt')
cfg.evaluate.batch_size = batch_size
run_evaluate(cfg)
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
@pytest.mark.parametrize("amp, qat, batch_size, num_epochs, max_bs, dynamic_bs, data_type",
[(False, True, 4, 1, 1, True, 'int8')])
def test_export(amp, qat, batch_size, num_epochs, max_bs, dynamic_bs, data_type, cfg):
# reset graph precision
policy = tf.keras.mixed_precision.Policy('float32')
tf.keras.mixed_precision.set_global_policy(policy)
cfg.train.num_epochs = num_epochs
cfg.train.amp = amp
cfg.train.qat = qat
cfg.export.checkpoint = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
f'efficientdet-d0_00{num_epochs}.tlt')
cfg.export.batch_size = max_bs
cfg.export.dynamic_batch_size = dynamic_bs
cfg.export.onnx_file = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
f'efficientdet-d0_00{num_epochs}.onnx')
run_export(cfg)
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/tests/test_pipeline_qat.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet nonQAT pipeline tests."""
from datetime import datetime
import omegaconf
import pytest
import os
import shutil
import horovod.tensorflow.keras as hvd
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.scripts.train import run_experiment as run_train
from nvidia_tao_tf2.cv.efficientdet.scripts.evaluate import run_experiment as run_evaluate
from nvidia_tao_tf2.cv.efficientdet.scripts.inference import infer_tlt
from nvidia_tao_tf2.cv.efficientdet.scripts.export import run_export
from nvidia_tao_tf2.cv.efficientdet.scripts.prune import run_pruning
TMP_MODEL_DIR = '/home/scratch.metropolis2/tao_ci/tao_tf2/models/tmp'
DATA_DIR = '/home/scratch.metropolis2/tao_ci/tao_tf2/data/coco'
time_str = datetime.now().strftime("%y_%m_%d_%H:%M:%S")
hvd.init()
@pytest.fixture(scope='function')
def cfg():
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
spec_file = os.path.join(parent_dir, 'experiment_specs', 'default.yaml')
default_cfg = omegaconf.OmegaConf.load(spec_file)
default_cfg.dataset.train_tfrecords = [DATA_DIR + '/val-*']
default_cfg.dataset.val_tfrecords = [DATA_DIR + '/val-*']
default_cfg.dataset.val_json_file = os.path.join(DATA_DIR, "annotations/instances_val2017.json")
default_cfg.train.num_examples_per_epoch = 128
default_cfg.train.checkpoint = ''
default_cfg.train.checkpoint_interval = 1
default_cfg.evaluate.num_samples = 10
return default_cfg
@pytest.mark.parametrize("amp, qat, batch_size, num_epochs",
[(True, False, 2, 2)])
def test_train(amp, qat, batch_size, num_epochs, cfg):
results_dir = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}")
if os.path.exists(results_dir):
shutil.rmtree(results_dir)
cfg.train.num_epochs = num_epochs
cfg.train.amp = amp
cfg.train.qat = qat
cfg.train.batch_size = batch_size
cfg.results_dir = results_dir
run_train(cfg)
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
@pytest.mark.parametrize("amp, qat, batch_size, num_epochs",
[(True, False, 2, 2)])
def test_eval(amp, qat, batch_size, num_epochs, cfg):
# reset graph precision
policy = tf.keras.mixed_precision.Policy('float32')
tf.keras.mixed_precision.set_global_policy(policy)
cfg.train.num_epochs = num_epochs
cfg.train.amp = amp
cfg.train.qat = qat
cfg.evaluate.checkpoint = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
f'efficientdet-d0_00{num_epochs}.tlt')
cfg.evaluate.batch_size = batch_size
run_evaluate(cfg)
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
@pytest.mark.parametrize("amp, qat, batch_size, num_epochs, max_bs, dynamic_bs, data_type",
[(True, False, 2, 2, 1, True, 'int8')])
def test_export(amp, qat, batch_size, num_epochs, max_bs, dynamic_bs, data_type, cfg):
# reset graph precision
policy = tf.keras.mixed_precision.Policy('float32')
tf.keras.mixed_precision.set_global_policy(policy)
cfg.train.num_epochs = num_epochs
cfg.train.amp = amp
cfg.train.qat = qat
cfg.export.checkpoint = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
f'efficientdet-d0_00{num_epochs}.tlt')
cfg.export.batch_size = max_bs
cfg.export.dynamic_batch_size = dynamic_bs
cfg.export.onnx_file = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
f'efficientdet-d0_00{num_epochs}.onnx')
run_export(cfg)
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
@pytest.mark.parametrize("amp, qat, batch_size, num_epochs",
[(True, False, 2, 2)])
def test_infer(amp, qat, batch_size, num_epochs, cfg):
cfg.train.num_epochs = num_epochs
cfg.train.amp = amp
cfg.train.qat = qat
cfg.inference.image_dir = os.path.join(DATA_DIR, "raw-data", "debug2017")
cfg.inference.output_dir = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
'infer_output')
cfg.inference.checkpoint = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
f'efficientdet-d0_00{num_epochs}.tlt')
infer_tlt(cfg)
@pytest.mark.parametrize("amp, qat, batch_size, num_epochs, threshold",
[(True, False, 2, 2, 0.5),
(True, False, 2, 2, 0.7),
(True, False, 2, 2, 0.9),
(True, False, 2, 2, 2.5),])
def test_prune(amp, qat, batch_size, num_epochs, threshold, cfg):
cfg.prune.threshold = threshold
cfg.train.num_epochs = num_epochs
cfg.train.amp = amp
cfg.train.qat = qat
cfg.prune.checkpoint = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}",
f'efficientdet-d0_00{num_epochs}.tlt')
cfg.prune.results_dir = os.path.join(
TMP_MODEL_DIR,
f"effdet_b{batch_size}_ep{num_epochs}_{time_str}")
run_pruning(cfg)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/tests/test_pipeline.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""EfficientDet model tests."""
import omegaconf
import pytest
import os
import numpy as np
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.model.efficientdet import efficientdet
from nvidia_tao_tf2.cv.efficientdet.model.model_builder import build_backbone
from nvidia_tao_tf2.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf2.cv.efficientdet.utils.config_utils import generate_params_from_cfg
from nvidia_tao_tf2.cv.efficientdet.utils.model_utils import num_params
@pytest.fixture(scope='function')
def cfg():
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
spec_file = os.path.join(parent_dir, 'experiment_specs', 'default.yaml')
default_cfg = omegaconf.OmegaConf.load(spec_file)
default_cfg.data_format = 'channels_last'
default_cfg.train.num_examples_per_epoch = 128
default_cfg.train.checkpoint = ''
default_cfg.train.checkpoint_interval = 1
default_cfg.evaluate.num_samples = 10
return default_cfg
@pytest.mark.parametrize(
"model_name, input_height, input_width, expected",
[('efficientdet-d0', 512, 512, 3880652),
('efficientdet-d0', 511, 513, 3880652),
('efficientdet-d1', 128, 512, 6626699),
('efficientdet-d2', 512, 512, 8098056),
('efficientdet-d3', 256, 512, 12033745),
('efficientdet-d4', 512, 256, 20725700),
('efficientdet-d5', 512, 128, 33655916),])
def test_arch(model_name, input_height, input_width, expected, cfg):
cfg.model.input_height = input_height
cfg.model.input_width = input_width
cfg.model.name = model_name
config = hparams_config.get_efficientdet_config(model_name)
config.update(generate_params_from_cfg(config, cfg, mode='train'))
input_shape = list(config.image_size) + [3]
model = efficientdet(input_shape, training=True, config=config)
assert num_params(model) == expected
tf.compat.v1.reset_default_graph()
@pytest.mark.parametrize(
"model_name, input_height, input_width",
[('efficientdet-d0', 512, 512,),
('efficientdet-d0', 512, 128,),
('efficientdet-d1', 128, 512,),
('efficientdet-d2', 512, 512,),
('efficientdet-d3', 256, 512,),
('efficientdet-d4', 512, 256,),
('efficientdet-d5', 512, 128,),])
def test_backbone(model_name, input_height, input_width, cfg):
cfg.model.input_height = input_height
cfg.model.input_width = input_width
cfg.model.name = model_name
config = hparams_config.get_efficientdet_config(model_name)
config.update(generate_params_from_cfg(config, cfg, mode='train'))
input_shape = list(config.image_size) + [3]
inputs = tf.keras.Input(shape=input_shape)
features = build_backbone(inputs, config)
np.testing.assert_array_equal(list(features.keys()), [0, 1, 2, 3, 4, 5])
np.testing.assert_array_equal(features[0].shape[1:3], list(config.image_size))
np.testing.assert_array_equal(features[5].shape[1:3], list(map(lambda x: x // 32, config.image_size)))
tf.compat.v1.reset_default_graph()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/tests/L0/test_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet utils tests."""
from functools import partial
import math
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.model.activation_builder import activation_fn
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def softplus(x):
return math.log(math.exp(x) + 1)
def expected_result(value, type):
if type in ['silu', 'swish']:
return sigmoid(value) * value
elif type == 'hswish':
return value * min(max(0, value + 3), 6) / 6
elif type == 'mish':
return math.tanh(softplus(value)) * value
raise ValueError(f"{type} not supported")
@pytest.mark.parametrize(
"type",
['silu', 'swish', 'hswish', 'mish'])
def test_activations(type):
values = [.5, 10]
inputs = tf.constant(values)
outputs = activation_fn(inputs, type)
_expected = partial(expected_result, type=type)
assert np.allclose(outputs.numpy(), list(map(lambda x: _expected(x), values)))
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/tests/L0/test_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""Data loader and processing test cases."""
import glob
import hashlib
import os
import numpy as np
from PIL import Image
import tensorflow as tf
from nvidia_tao_tf2.cv.core import tf_example_decoder
from nvidia_tao_tf2.cv.efficientdet.dataloader import dataloader
from nvidia_tao_tf2.cv.efficientdet.model import anchors
from nvidia_tao_tf2.cv.efficientdet.utils import hparams_config
def int64_feature(value):
"""int64_feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
"""int64_list_feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
"""bytes_feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
"""bytes_list_feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
"""float_feature."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def float_list_feature(value):
"""float_list_feature."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def test_dataloader(tmpdir):
tf.random.set_seed(42)
# generate dummy tfrecord
image_height = 512
image_width = 512
filename = "dummy_example.jpg"
image_id = 1
full_path = os.path.join(tmpdir, filename)
# save dummy image to file
dummy_array = np.zeros((image_height, image_width, 3), dtype=np.uint8)
Image.fromarray(dummy_array, 'RGB').save(full_path)
with open(full_path, 'rb') as fid:
encoded_jpg = fid.read()
# encoded_jpg_b = bytearray(encoded_jpg)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = [0.25]
xmax = [0.5]
ymin = [0.25]
ymax = [0.5]
is_crowd = [False]
category_names = [b'void']
category_ids = [0]
area = [16384]
feature_dict = {
'image/height':
int64_feature(image_height),
'image/width':
int64_feature(image_width),
'image/filename':
bytes_feature(filename.encode('utf8')),
'image/source_id':
bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
bytes_feature(key.encode('utf8')),
'image/encoded':
bytes_feature(encoded_jpg),
'image/caption':
bytes_list_feature([]),
'image/format':
bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
float_list_feature(xmin),
'image/object/bbox/xmax':
float_list_feature(xmax),
'image/object/bbox/ymin':
float_list_feature(ymin),
'image/object/bbox/ymax':
float_list_feature(ymax),
'image/object/class/text':
bytes_list_feature(category_names),
'image/object/class/label':
int64_list_feature(category_ids),
'image/object/is_crowd':
int64_list_feature(is_crowd),
'image/object/area':
float_list_feature(area),
}
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
# dump tfrecords
tfrecords_dir = tmpdir.mkdir("tfrecords")
dummy_tfrecords = str(tfrecords_dir.join('/dummy-001'))
writer = tf.io.TFRecordWriter(str(dummy_tfrecords))
writer.write(example.SerializeToString())
writer.close()
params = hparams_config.get_detection_config('efficientdet-d0').as_dict()
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes'])
example_decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=params['regenerate_source_id'],
include_image=True)
tfrecord_path = os.path.join(tfrecords_dir, "dummy*")
dataset = tf.data.TFRecordDataset(glob.glob(tfrecord_path))
value = next(iter(dataset))
reader = dataloader.CocoDataset(
tfrecord_path, is_training=True,
use_fake_data=False,
max_instances_per_image=100)
result = reader.dataset_parser(
value, example_decoder, anchor_labeler, params)
assert np.allclose(result[0][0, 0, :], [-2.1651785, -2.0357141, -1.8124998])
assert len(result) == 11
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/tests/L0/test_dataloader.py |
"""Model utils."""
import contextlib
from typing import Text, Tuple, Union
import numpy as np
import tensorflow as tf
# pylint: disable=logging-format-interpolation
def num_params(model):
"""Return number of parameters."""
return np.sum(
[np.prod(v.get_shape().as_list()) for v in model.trainable_variables])
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = inputs / survival_prob * binary_tensor
return output
def parse_image_size(image_size: Union[Text, int, Tuple[int, int]]):
"""Parse the image size and return (height, width).
Args:
image_size: A integer, a tuple (H, W), or a string with HxW format.
Returns:
A tuple of integer (height, width).
"""
if isinstance(image_size, int):
# image_size is integer, with the same width and height.
return (image_size, image_size)
if isinstance(image_size, str):
# image_size is a string with format WxH
width, height = image_size.lower().split('x')
return (int(height), int(width))
if isinstance(image_size, tuple):
return image_size
raise ValueError(f'image_size must be an int, WxH string, or (height, width) tuple. Was {image_size}')
def get_feat_sizes(image_size: Union[Text, int, Tuple[int, int]],
max_level: int):
"""Get feat widths and heights for all levels.
Args:
image_size: A integer, a tuple (H, W), or a string with HxW format.
max_level: maximum feature level.
Returns:
feat_sizes: a list of tuples (height, width) for each level.
"""
image_size = parse_image_size(image_size)
feat_sizes = [{'height': image_size[0], 'width': image_size[1]}]
feat_size = image_size
for _ in range(1, max_level + 1):
feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1)
feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]})
return feat_sizes
def verify_feats_size(feats,
feat_sizes,
min_level,
max_level,
data_format='channels_last'):
"""Verify the feature map sizes."""
expected_output_size = feat_sizes[min_level:max_level + 1]
for cnt, size in enumerate(expected_output_size):
h_id, w_id = (2, 3) if data_format == 'channels_first' else (1, 2)
if feats[cnt].shape[h_id] != size['height']:
raise ValueError(
f"feats[{cnt}] has shape {feats[cnt].shape} but its height should be {size['height']}. "
"(input_height: {feat_sizes[0]['height']}, min_level: {min_level}, max_level: {max_level}.)")
if feats[cnt].shape[w_id] != size['width']:
raise ValueError(
f"feats[{cnt}] has shape {feats[cnt].shape} but its width should be {size['width']}."
"(input_width: {feat_sizes[0]['width']}, min_level: {min_level}, max_level: {max_level}.)")
@contextlib.contextmanager
def float16_scope():
"""Scope class for float16."""
def _custom_getter(getter, *args, **kwargs):
"""Returns a custom getter that methods must be called under."""
cast_to_float16 = False
requested_dtype = kwargs['dtype']
if requested_dtype == tf.float16:
kwargs['dtype'] = tf.float32
cast_to_float16 = True
var = getter(*args, **kwargs)
if cast_to_float16:
var = tf.cast(var, tf.float16)
return var
with tf.variable_scope('', custom_getter=_custom_getter) as varscope:
yield varscope
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/model_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A few predefined label id mapping."""
import tensorflow as tf
import yaml
coco = {
# 0: 'background',
1: 'person',
2: 'bicycle',
3: 'car',
4: 'motorcycle',
5: 'airplane',
6: 'bus',
7: 'train',
8: 'truck',
9: 'boat',
10: 'traffic light',
11: 'fire hydrant',
13: 'stop sign',
14: 'parking meter',
15: 'bench',
16: 'bird',
17: 'cat',
18: 'dog',
19: 'horse',
20: 'sheep',
21: 'cow',
22: 'elephant',
23: 'bear',
24: 'zebra',
25: 'giraffe',
27: 'backpack',
28: 'umbrella',
31: 'handbag',
32: 'tie',
33: 'suitcase',
34: 'frisbee',
35: 'skis',
36: 'snowboard',
37: 'sports ball',
38: 'kite',
39: 'baseball bat',
40: 'baseball glove',
41: 'skateboard',
42: 'surfboard',
43: 'tennis racket',
44: 'bottle',
46: 'wine glass',
47: 'cup',
48: 'fork',
49: 'knife',
50: 'spoon',
51: 'bowl',
52: 'banana',
53: 'apple',
54: 'sandwich',
55: 'orange',
56: 'broccoli',
57: 'carrot',
58: 'hot dog',
59: 'pizza',
60: 'donut',
61: 'cake',
62: 'chair',
63: 'couch',
64: 'potted plant',
65: 'bed',
67: 'dining table',
70: 'toilet',
72: 'tv',
73: 'laptop',
74: 'mouse',
75: 'remote',
76: 'keyboard',
77: 'cell phone',
78: 'microwave',
79: 'oven',
80: 'toaster',
81: 'sink',
82: 'refrigerator',
84: 'book',
85: 'clock',
86: 'vase',
87: 'scissors',
88: 'teddy bear',
89: 'hair drier',
90: 'toothbrush',
}
voc = {
# 0: 'background',
1: 'aeroplane',
2: 'bicycle',
3: 'bird',
4: 'boat',
5: 'bottle',
6: 'bus',
7: 'car',
8: 'cat',
9: 'chair',
10: 'cow',
11: 'diningtable',
12: 'dog',
13: 'horse',
14: 'motorbike',
15: 'person',
16: 'pottedplant',
17: 'sheep',
18: 'sofa',
19: 'train',
20: 'tvmonitor',
}
waymo = {
# 0: 'background',
1: 'vehicle',
2: 'pedestrian',
3: 'cyclist',
}
def get_label_map(mapping):
"""Get label id map based on the name, filename, or dict."""
# case 1: if it is None or dict, just return it.
if not mapping or isinstance(mapping, dict):
return mapping
# case 2: if it is a yaml file, load it to a dict and return the dict.
assert isinstance(mapping, str), 'mapping must be dict or str.'
if mapping.endswith('.yaml'):
with tf.io.gfile.GFile(mapping) as f:
return yaml.load(f, Loader=yaml.FullLoader)
# case 3: it is a name of a predefined dataset.
return {'coco': coco, 'voc': voc, 'waymo': waymo}[mapping]
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/label_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Horovod utils."""
import logging
import os
import multiprocessing
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from nvidia_tao_tf2.common.utils import set_random_seed
logger = logging.getLogger(__name__)
def get_rank():
"""Get rank."""
try:
return hvd.rank()
except Exception:
return 0
def get_world_size():
"""Get world size."""
try:
return hvd.size()
except Exception:
return 1
def is_main_process():
"""Check if the current process is rank 0."""
return get_rank() == 0
def initialize(cfg, logger, training=True):
"""Initialize training."""
logger.setLevel(logging.INFO)
hvd.init()
use_xla = False
if training:
os.environ['TF_NUM_INTRAOP_THREADS'] = '1'
os.environ['TF_NUM_INTEROP_THREADS'] = str(max(2, (multiprocessing.cpu_count() // hvd.size()) - 2))
if use_xla:
# it turns out tf_xla_enable_lazy_compilation is used before importing tersorflow for the first time,
# so setting this flag in the current function would have no effect. Thus, this flag is already
# set in Dockerfile. The remaining XLA flags are set here.
TF_XLA_FLAGS = os.environ['TF_XLA_FLAGS'] # contains tf_xla_enable_lazy_compilation
os.environ['TF_XLA_FLAGS'] = TF_XLA_FLAGS + " --tf_xla_auto_jit=1"
os.environ['TF_EXTRA_PTXAS_OPTIONS'] = "-sw200428197=true"
tf.keras.backend.clear_session()
tf.config.optimizer.set_jit(True)
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
assert tf.config.experimental.get_memory_growth(gpu)
tf.config.experimental.set_visible_devices(gpus, 'GPU')
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
if training:
set_random_seed(cfg.train.random_seed + hvd.rank())
if cfg.train.amp and not cfg.train.qat:
policy = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(policy)
else:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
if is_main_process():
if not os.path.exists(cfg.results_dir):
os.makedirs(cfg.results_dir, exist_ok=True)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/horovod_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit EfficientDet utils module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
import numpy as np
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
def diou_nms(dets, iou_thresh=None):
"""DIOU non-maximum suppression.
diou = iou - square of euclidian distance of box centers
/ square of diagonal of smallest enclosing bounding box
Reference: https://arxiv.org/pdf/1911.08287.pdf
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
center_x = (x1 + x2) / 2
center_y = (y1 + y2) / 2
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[order[1:]] - intersection)
smallest_enclosing_box_x1 = np.minimum(x1[i], x1[order[1:]])
smallest_enclosing_box_x2 = np.maximum(x2[i], x2[order[1:]])
smallest_enclosing_box_y1 = np.minimum(y1[i], y1[order[1:]])
smallest_enclosing_box_y2 = np.maximum(y2[i], y2[order[1:]])
square_of_the_diagonal = (
(smallest_enclosing_box_x2 - smallest_enclosing_box_x1)**2 +
(smallest_enclosing_box_y2 - smallest_enclosing_box_y1)**2)
square_of_center_distance = ((center_x[i] - center_x[order[1:]])**2 +
(center_y[i] - center_y[order[1:]])**2)
# Add 1e-10 for numerical stability.
diou = iou - square_of_center_distance / (square_of_the_diagonal + 1e-10)
inds = np.where(diou <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def hard_nms(dets, iou_thresh=None):
"""The basic hard non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (areas[i] + areas[order[1:]] - intersection)
inds = np.where(overlap <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def soft_nms(dets, nms_configs):
"""Soft non-maximum suppression.
[1] Soft-NMS -- Improving Object Detection With One Line of Code.
https://arxiv.org/abs/1704.04503
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain the following members
* method: one of {`linear`, `gaussian`, 'hard'}. Use `gaussian` if None.
* iou_thresh (float): IOU threshold, only for `linear`, `hard`.
* sigma: Gaussian parameter, only for method 'gaussian'.
* score_thresh (float): Box score threshold for final boxes.
Returns:
numpy.array: Retained boxes.
"""
method = nms_configs['method']
# Default sigma and iou_thresh are from the original soft-nms paper.
sigma = nms_configs['sigma'] or 0.5
iou_thresh = nms_configs['iou_thresh'] or 0.3
score_thresh = nms_configs['score_thresh'] or 0.001
x1 = np.float32(dets[:, 0])
y1 = np.float32(dets[:, 1])
x2 = np.float32(dets[:, 2])
y2 = np.float32(dets[:, 3])
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# expand dets with areas, and the second dimension is
# x1, y1, x2, y2, score, area
dets = np.concatenate((dets, areas[:, None]), axis=1)
retained_box = []
while dets.size > 0:
max_idx = np.argmax(dets[:, 4], axis=0)
dets[[0, max_idx], :] = dets[[max_idx, 0], :]
retained_box.append(dets[0, :-1])
xx1 = np.maximum(dets[0, 0], dets[1:, 0])
yy1 = np.maximum(dets[0, 1], dets[1:, 1])
xx2 = np.minimum(dets[0, 2], dets[1:, 2])
yy2 = np.minimum(dets[0, 3], dets[1:, 3])
w = np.maximum(xx2 - xx1 + 1, 0.0)
h = np.maximum(yy2 - yy1 + 1, 0.0)
inter = w * h
iou = inter / (dets[0, 5] + dets[1:, 5] - inter)
if method == 'linear':
weight = np.ones_like(iou)
weight[iou > iou_thresh] -= iou[iou > iou_thresh]
elif method == 'gaussian':
weight = np.exp(-(iou * iou) / sigma)
else: # traditional nms
weight = np.ones_like(iou)
weight[iou > iou_thresh] = 0
dets[1:, 4] *= weight
retained_idx = np.where(dets[1:, 4] >= score_thresh)[0]
dets = dets[retained_idx + 1, :]
return np.vstack(retained_box)
def nms(dets, nms_configs):
"""Non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain parameters.
Returns:
numpy.array: Retained boxes.
"""
nms_configs = nms_configs or {}
method = nms_configs['method']
if method == 'hard' or not method:
return hard_nms(dets, nms_configs['iou_thresh'])
if method == 'diou':
return diou_nms(dets, nms_configs['iou_thresh'])
if method in ('linear', 'gaussian'):
return soft_nms(dets, nms_configs)
raise ValueError(f'Unknown NMS method: {method}')
def per_class_nms(boxes, scores, classes, image_id, image_scale, num_classes,
max_boxes_to_draw, nms_configs):
"""Perform per class nms."""
boxes = boxes[:, [1, 0, 3, 2]]
detections = []
for c in range(num_classes):
indices = np.where(classes == c)[0]
if indices.shape[0] == 0:
continue
boxes_cls = boxes[indices, :]
scores_cls = scores[indices]
# Select top-scoring boxes in each class and apply non-maximum suppression
# (nms) for boxes in the same class. The selected boxes from each class are
# then concatenated for the final detection outputs.
all_detections_cls = np.column_stack((boxes_cls, scores_cls))
top_detections_cls = nms(all_detections_cls, nms_configs)
top_detections_cls = np.column_stack(
(np.repeat(image_id, len(top_detections_cls)),
top_detections_cls,
np.repeat(c + 1, len(top_detections_cls)))
)
detections.append(top_detections_cls)
def _generate_dummy_detections(number):
detections_dummy = np.zeros((number, 7), dtype=np.float32)
detections_dummy[:, 0] = image_id[0]
detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE
return detections_dummy
if detections:
detections = np.vstack(detections)
# take final 100 detections
indices = np.argsort(-detections[:, -2])
detections = np.array(
detections[indices[0:max_boxes_to_draw]], dtype=np.float32)
# Add dummy detections to fill up to 100 detections
n = max(max_boxes_to_draw - len(detections), 0)
detections_dummy = _generate_dummy_detections(n)
detections = np.vstack([detections, detections_dummy])
else:
detections = _generate_dummy_detections(max_boxes_to_draw)
detections[:, 1:5] *= image_scale
return detections
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/nms_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of helper functions."""
import os
import json
import tensorflow as tf
import tempfile
import zipfile
from eff.core import Archive, File
from eff.callbacks import BinaryContentCallback
from tensorflow_quantization.custom_qdq_cases import EfficientNetQDQCase
from tensorflow_quantization.quantize import quantize_model
from nvidia_tao_tf2.cv.efficientdet.layers.image_resize_layer import ImageResizeLayer
from nvidia_tao_tf2.cv.efficientdet.layers.weighted_fusion_layer import WeightedFusion
from nvidia_tao_tf2.cv.efficientdet.utils import keras_utils
CUSTOM_OBJS = {
'ImageResizeLayer': ImageResizeLayer,
'WeightedFusion': WeightedFusion,
}
def fetch_optimizer(model, opt_type) -> tf.keras.optimizers.Optimizer:
"""Get the base optimizer used by the current model."""
# this is the case where our target optimizer is not wrapped by any other optimizer(s)
if isinstance(model.optimizer, opt_type):
return model.optimizer
# Dive into nested optimizer object until we reach the target opt
opt = model.optimizer
while hasattr(opt, '_optimizer'):
opt = opt._optimizer
if isinstance(opt, opt_type):
return opt
raise TypeError(f'Failed to find {opt_type} in the nested optimizer object')
def decode_eff(eff_model_path, enc_key=None):
"""Decode EFF to saved_model directory.
Args:
eff_model_path (str): Path to eff model
enc_key (str, optional): Encryption key. Defaults to None.
Returns:
str: Path to the saved_model
"""
# Decrypt EFF
eff_filename = os.path.basename(eff_model_path)
eff_art = Archive.restore_artifact(
restore_path=eff_model_path,
artifact_name=eff_filename,
passphrase=enc_key)
zip_path = eff_art.get_handle()
# Unzip
ckpt_path = os.path.dirname(zip_path)
# TODO(@yuw): try catch?
with zipfile.ZipFile(zip_path, "r") as zip_file:
zip_file.extractall(ckpt_path)
extracted_files = os.listdir(ckpt_path)
ckpt_name = None
for f in extracted_files:
if 'ckpt' in f:
ckpt_name = f.split('.')[0]
# TODO(@yuw): backbone ckpt vs effdet vs failed case
# if not ckpt_name:
# raise IOError(f"{eff_model_path} was not saved properly.")
return ckpt_path, ckpt_name
def load_model(eff_model_path, hparams, mode='train', is_qat=False):
"""Load hdf5 or EFF model.
Args:
model_path (str): Path to EfficientDet checkpoint
enc_key (str, optional): Encryption key. Defaults to None.
Returns:
Keras model: Loaded model
"""
ckpt_path, ckpt_name = decode_eff(eff_model_path, hparams.encryption_key)
if mode != 'train':
mode = 'eval'
model = load_json_model(
os.path.join(ckpt_path, f'{mode}_graph.json'))
if is_qat:
model = quantize_model(model, custom_qdq_cases=[EfficientNetQDQCase()])
keras_utils.restore_ckpt(
model,
os.path.join(ckpt_path, ckpt_name),
hparams.get('moving_average_decay', None) or hparams.train.moving_average_decay,
steps_per_epoch=0,
expect_partial=True)
return model
def load_json_model(json_path, new_objs=None):
"""Helper function to load keras model from json file."""
new_objs = new_objs or {}
with open(json_path, 'r', encoding='utf-8') as jf:
model_json = jf.read()
loaded_model = tf.keras.models.model_from_json(
model_json,
custom_objects={**CUSTOM_OBJS, **new_objs})
return loaded_model
def dump_json(model, out_path):
"""Model to json."""
json_str = model.to_json()
model_json = json.loads(json_str)
# workaround to remove float16 dtype if trained with AMP
for layer in model_json['config']['layers']:
if isinstance(layer['config']['dtype'], dict):
layer['config']['dtype'] = 'float32'
with open(out_path, "w", encoding='utf-8') as jf:
json.dump(model_json, jf)
def dump_eval_json(graph_dir, train_graph="train_graph.json", eval_graph='eval_graph.json'):
"""Generate and save the evaluation graph by modifying train graph.
Args:
graph_dir (str): Directory where the train graph resides in.
"""
# generate eval graph for exporting. (time saving hack)
with open(os.path.join(graph_dir, train_graph), 'r', encoding='utf-8') as f:
pruned_json = json.load(f)
for layer in pruned_json['config']['layers']:
if layer['class_name'] == 'BatchNormalization':
if layer['inbound_nodes'][0][0][-1]:
layer['inbound_nodes'][0][0][-1]['training'] = False
with open(os.path.join(graph_dir, eval_graph), 'w', encoding='utf-8') as jf:
json.dump(pruned_json, jf)
def zipdir(src, zip_path):
"""Function creates zip archive from src in dst location.
Args:
src: Path to directory to be archived.
dst: Path where archived dir will be stored.
"""
# destination directory
os.chdir(os.path.dirname(zip_path))
# zipfile handler
with zipfile.ZipFile(zip_path, "w") as zf:
# writing content of src directory to the archive
for root, _, filenames in os.walk(src):
for filename in filenames:
zf.write(
os.path.join(root, filename),
arcname=os.path.join(root.replace(src, ""), filename))
def encode_eff(filepath, eff_model_path, enc_key, is_pruned=False):
"""Encode saved_model directory into a .tlt file.
Args:
filepath (str): Path to saved_model
eff_model_path (str): Path to the output EFF file
enc_key (str): Encryption key
"""
# always overwrite
if os.path.exists(eff_model_path):
os.remove(eff_model_path)
os_handle, temp_zip_file = tempfile.mkstemp()
os.close(os_handle)
# create zipfile from saved_model directory
zipdir(filepath, temp_zip_file)
# create artifacts from zipfile
eff_filename = os.path.basename(eff_model_path)
zip_art = File(
name=eff_filename,
is_pruned=is_pruned,
description="Artifact from checkpoint",
filepath=temp_zip_file,
encryption=bool(enc_key),
content_callback=BinaryContentCallback,
)
Archive.save_artifact(
save_path=eff_model_path, artifact=zip_art, passphrase=enc_key)
return temp_zip_file
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/helper.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hparams for model architecture and trainer."""
import ast
import collections
import copy
from typing import Any, Dict, Text
import six
import tensorflow as tf
import yaml
def eval_str_fn(val):
"""Eval str."""
if val in {'true', 'false'}:
return val == 'true'
try:
return ast.literal_eval(val)
except (ValueError, SyntaxError):
return val
# pylint: disable=protected-access
class Config(object):
"""A config utility class."""
def __init__(self, config_dict=None):
"""Init."""
self.update(config_dict)
def __setattr__(self, k, v):
"""Set attr."""
self.__dict__[k] = Config(v) if isinstance(v, dict) else copy.deepcopy(v)
def __getattr__(self, k):
"""Get attr."""
return self.__dict__[k]
def __getitem__(self, k):
"""Get item."""
return self.__dict__[k]
def __repr__(self):
"""repr."""
return repr(self.as_dict())
def __str__(self):
"""str."""
try:
return yaml.dump(self.as_dict(), indent=4)
except TypeError:
return str(self.as_dict())
def _update(self, config_dict, allow_new_keys=True):
"""Recursively update internal members."""
if not config_dict:
return
for k, v in six.iteritems(config_dict):
if k not in self.__dict__:
if allow_new_keys:
self.__setattr__(k, v) # noqa pylint: disable=C2801
else:
raise KeyError(f'Key `{k}` does not exist for overriding.')
else:
if isinstance(self.__dict__[k], Config) and isinstance(v, dict):
self.__dict__[k]._update(v, allow_new_keys)
elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):
self.__dict__[k]._update(v.as_dict(), allow_new_keys)
else:
self.__setattr__(k, v) # noqa pylint: disable=C2801
def get(self, k, default_value=None):
"""Get value."""
return self.__dict__.get(k, default_value)
def update(self, config_dict):
"""Update members while allowing new keys."""
self._update(config_dict, allow_new_keys=True)
def keys(self):
"""Return all keys."""
return self.__dict__.keys()
def override(self, config_dict_or_str, allow_new_keys=False):
"""Update members while disallowing new keys."""
if isinstance(config_dict_or_str, str):
if not config_dict_or_str:
return
if '=' in config_dict_or_str:
config_dict = self.parse_from_str(config_dict_or_str)
elif config_dict_or_str.endswith('.yaml'):
config_dict = self.parse_from_yaml(config_dict_or_str)
else:
raise ValueError(
f'Invalid string {config_dict_or_str}, must end with .yaml or contains "=".')
elif isinstance(config_dict_or_str, dict):
config_dict = config_dict_or_str
else:
raise ValueError(f'Unknown value type: {config_dict_or_str}')
self._update(config_dict, allow_new_keys)
def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:
"""Parses a yaml file and returns a dictionary."""
with tf.io.gfile.GFile(yaml_file_path, 'r') as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
return config_dict
def save_to_yaml(self, yaml_file_path):
"""Write a dictionary into a yaml file."""
with tf.io.gfile.GFile(yaml_file_path, 'w') as f:
yaml.dump(self.as_dict(), f, default_flow_style=False)
def parse_from_str(self, config_str: Text) -> Dict[Any, Any]:
"""Parse a string like 'x.y=1,x.z=2' to nested dict {x: {y: 1, z: 2}}."""
if not config_str:
return {}
def add_kv_recursive(k, v):
"""Recursively parse x.y.z=tt to {x: {y: {z: tt}}}."""
if '.' not in k:
if '*' in v:
# we reserve * to split arrays.
return {k: [eval_str_fn(vv) for vv in v.split('*')]}
return {k: eval_str_fn(v)}
pos = k.index('.')
return {k[:pos]: add_kv_recursive(k[pos + 1:], v)}
def merge_dict_recursive(target, src):
"""Recursively merge two nested dictionary."""
for k in src.keys():
if ((k in target and isinstance(target[k], dict) and
isinstance(src[k], collections.abc.Mapping))):
merge_dict_recursive(target[k], src[k])
else:
target[k] = src[k]
config_dict = {}
try:
for kv_pair in config_str.split(','):
if not kv_pair: # skip empty string
continue
key_str, value_str = kv_pair.split('=')
key_str = key_str.strip()
merge_dict_recursive(config_dict, add_kv_recursive(key_str, value_str))
return config_dict
except ValueError as e:
raise ValueError(f'Invalid config_str: {config_str}') from e
def as_dict(self):
"""Returns a dict representation."""
config_dict = {}
for k, v in six.iteritems(self.__dict__):
if isinstance(v, Config):
config_dict[k] = v.as_dict()
else:
config_dict[k] = copy.deepcopy(v)
return config_dict
# pylint: enable=protected-access
def default_detection_configs():
"""Returns a default detection configs."""
h = Config()
# model name.
h.name = 'efficientdet-d0'
h.model_name = 'efficientdet-d0'
# activation type: see activation_fn in model/activation_builder.py.
h.act_type = 'swish'
# input preprocessing parameters
h.image_size = 640 # An integer or a string WxH such as 640x320.
h.target_size = None
h.input_rand_hflip = True
h.jitter_min = 0.1
h.jitter_max = 2.0
h.auto_augment = False
h.auto_color = False
h.auto_translate_xy = False
h.grid_mask = False
h.use_augmix = False
# mixture_width, mixture_depth, alpha
h.augmix_params = [3, -1, 1]
h.sample_image = None
h.shuffle_buffer = 10000
# dataset specific parameters
h.num_classes = 91
h.seg_num_classes = 3 # segmentation classes
h.heads = ['object_detection'] # 'object_detection', 'segmentation'
h.skip_crowd_during_training = True
h.label_map = None # a dict or a string of 'coco', 'voc', 'waymo'.
h.max_instances_per_image = 100 # Default to 100 for COCO.
h.regenerate_source_id = False
# model architecture
h.min_level = 3
h.max_level = 7
h.num_scales = 3
h.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
h.anchor_scale = 4.0
# is batchnorm training mode
h.is_training_bn = True
# optimization
h.momentum = 0.9
h.optimizer = 'sgd'
h.learning_rate = 0.08 # 0.008 for adam.
h.lr_warmup_init = 0.008 # 0.0008 for adam.
h.lr_warmup_epoch = 1.0
h.clip_gradients_norm = 10.0
h.num_epochs = 300
h.data_format = 'channels_last'
# classification loss
h.label_smoothing = 0.0 # 0.1 is a good default
# Behold the focal loss parameters
h.alpha = 0.25
h.gamma = 1.5
# localization loss
h.delta = 0.1 # regularization parameter of huber loss.
# total loss = box_loss * box_loss_weight + iou_loss * iou_loss_weight
h.box_loss_weight = 50.0
h.iou_loss_type = None
h.iou_loss_weight = 1.0
# regularization l2 loss.
h.l2_weight_decay = 4e-5
h.l1_weight_decay = 0.0
h.mixed_precision = False # If False, use float32.
h.mixed_precision_on_inputs = False
h.loss_scale = 2**15
# For detection.
h.box_class_repeats = 3
h.fpn_cell_repeats = 3
h.fpn_num_filters = 88
h.separable_conv = True
h.apply_bn_for_resampling = True
h.conv_after_downsample = False
h.conv_bn_act_pattern = False
h.drop_remainder = True # drop remainder for the final batch eval.
# For post-processing nms, must be a dict.
h.nms_configs = {
'method': 'gaussian',
'iou_thresh': None, # use the default value based on method.
'score_thresh': 0.,
'sigma': None,
'pyfunc': True,
'max_nms_inputs': 5000,
'max_output_size': 100,
}
# version.
h.fpn_name = None
h.fpn_weight_method = None
h.fpn_config = None
# No stochastic depth in default.
h.survival_prob = None
h.img_summary_steps = None
h.lr_decay_method = 'cosine'
h.moving_average_decay = 0.9998
h.ckpt_var_scope = None # ckpt variable scope.
# If true, skip loading pretrained weights if shape mismatches.
h.skip_mismatch = True
h.backbone_name = 'efficientnet-b1'
h.backbone_config = None
h.backbone_init = None
h.var_freeze_expr = None
# A temporary flag to switch between legacy and keras models.
h.use_keras_model = True
h.dataset_type = None
h.positives_momentum = None
h.grad_checkpoint = False
# experimental
h.set_num_threads = 1
h.use_xla = False
h.seed = 42
h.results_dir = None
h.freeze_blocks = None
h.freeze_bn = False
h.encryption_key = None
h.qat = False
return h
efficientdet_model_param_dict = {
'resdet18':
dict( # noqa pylint: disable=R1735
name='resdet18',
backbone_name='resnet18',
image_size=512,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
),
'resdet34':
dict( # noqa pylint: disable=R1735
name='resdet34',
backbone_name='resnet34',
image_size=512,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
),
'efficientdet-d0':
dict( # noqa pylint: disable=R1735
name='efficientdet-d0',
backbone_name='efficientnet-b0',
image_size=512,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
),
'efficientdet-d1':
dict( # noqa pylint: disable=R1735
name='efficientdet-d1',
backbone_name='efficientnet-b1',
image_size=640,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
),
'efficientdet-d2':
dict( # noqa pylint: disable=R1735
name='efficientdet-d2',
backbone_name='efficientnet-b2',
image_size=768,
fpn_num_filters=112,
fpn_cell_repeats=5,
box_class_repeats=3,
),
'efficientdet-d3':
dict( # noqa pylint: disable=R1735
name='efficientdet-d3',
backbone_name='efficientnet-b3',
image_size=896,
fpn_num_filters=160,
fpn_cell_repeats=6,
box_class_repeats=4,
),
'efficientdet-d4':
dict( # noqa pylint: disable=R1735
name='efficientdet-d4',
backbone_name='efficientnet-b4',
image_size=1024,
fpn_num_filters=224,
fpn_cell_repeats=7,
box_class_repeats=4,
),
'efficientdet-d5':
dict( # noqa pylint: disable=R1735
name='efficientdet-d5',
backbone_name='efficientnet-b5',
image_size=1280,
fpn_num_filters=288,
fpn_cell_repeats=7,
box_class_repeats=4,
),
'efficientdet-d6':
dict( # noqa pylint: disable=R1735
name='efficientdet-d6',
backbone_name='efficientnet-b6',
image_size=1280,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
'efficientdet-d7':
dict( # noqa pylint: disable=R1735
name='efficientdet-d7',
backbone_name='efficientnet-b6',
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=5.0,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
'efficientdet-d7x':
dict( # noqa pylint: disable=R1735
name='efficientdet-d7x',
backbone_name='efficientnet-b7',
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=4.0,
max_level=8,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
}
def get_efficientdet_config(model_name='efficientdet-d1'):
"""Get the default config for EfficientDet based on model name."""
h = default_detection_configs()
if model_name in efficientdet_model_param_dict:
h.override(efficientdet_model_param_dict[model_name])
else:
raise ValueError(f'Unknown model name: {model_name}')
return h
def get_detection_config(model_name):
"""Get detection config."""
if model_name.startswith('efficientdet') or model_name.startswith('resdet'):
return get_efficientdet_config(model_name)
raise ValueError('model name must start with efficientdet or resdet.')
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/hparams_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""COCO-style evaluation metrics."""
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import horovod.tensorflow.keras as hvd
class EvaluationMetric():
"""COCO evaluation metric class.
This class cannot inherit from tf.keras.metrics.Metric due to numpy.
"""
def __init__(self, filename=None, testdev_dir=None, label_map=None):
"""Constructs COCO evaluation class.
The class provides the interface to metrics_fn in TPUEstimator. The
_update_op() takes detections from each image and push them to
self.detections. The _evaluate() loads a JSON file in COCO annotation format
as the groundtruth and runs COCO evaluation.
Args:
filename: Ground truth JSON file name. If filename is None, use
groundtruth data passed from the dataloader for evaluation. filename is
ignored if testdev_dir is not None.
testdev_dir: folder name for testdev data. If None, run eval without
groundtruth, and filename will be ignored.
label_map: a dict from id to class name. Used for per-class AP.
"""
self.label_map = label_map
self.filename = filename
self.testdev_dir = testdev_dir
self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',
'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']
self.reset_states()
def reset_states(self):
"""Reset COCO API object."""
self.detections = []
self.dataset = {
'images': [],
'annotations': [],
'categories': []
}
self.image_id = 1
self.annotation_id = 1
self.category_ids = []
self.metric_values = None
def evaluate(self):
"""Evaluates with detections from all images with COCO API.
Returns:
coco_metric: float numpy array with shape [12] representing the
coco-style evaluation metrics.
"""
if self.filename:
coco_gt = COCO(self.filename)
else:
coco_gt = COCO()
coco_gt.dataset = self.dataset
coco_gt.createIndex()
# Run on validation dataset.
detections = np.array(self.detections)
image_ids = list(set(detections[:, 0]))
coco_dt = coco_gt.loadRes(detections)
coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
if self.label_map:
# Get per_class AP, see pycocotools/cocoeval.py:334
# TxRxKxAxM: iouThrs x recThrs x catIds x areaRng x maxDets
# Use areaRng_id=0 ('all') and maxDets_id=-1 (200) in default
precision = coco_eval.eval['precision'][:, :, :, 0, -1]
# Ideally, label_map should match the eval set, but it is possible that
# some classes has no data in the eval set.
ap_perclass = [0] * max(precision.shape[-1], len(self.label_map))
for c in range(precision.shape[-1]): # iterate over all classes
precision_c = precision[:, :, c]
# Only consider values if > -1.
precision_c = precision_c[precision_c > -1]
ap_c = np.mean(precision_c) if precision_c.size else -1.
ap_perclass[c] = ap_c
coco_metrics = np.concatenate((coco_metrics, ap_perclass))
# Return the concat normal and per-class AP.
return np.array(coco_metrics, dtype=np.float32)
def result(self):
"""Return the metric values (and compute it if needed)."""
if self.metric_values is None:
self.metric_values = self.evaluate()
return self.metric_values
def update_state(self, groundtruth_data, detections):
"""Update detection results and groundtruth data.
Append detection results to self.detections to aggregate results from
all validation set. The groundtruth_data is parsed and added into a
dictionary with the same format as COCO dataset, which can be used for
evaluation.
Args:
groundtruth_data: Groundtruth annotations in a tensor with each row
representing [y1, x1, y2, x2, is_crowd, area, class].
detections: Detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class].
"""
for i, det in enumerate(detections):
# Filter out detections with predicted class label = -1.
indices = np.where(det[:, -1] > -1)[0]
det = det[indices]
if det.shape[0] == 0:
continue
# Append groundtruth annotations to create COCO dataset object.
# Add images.
image_id = det[0, 0]
if image_id == -1:
image_id = self.image_id
det[:, 0] = image_id
self.detections.extend(det)
if not self.filename and not self.testdev_dir:
# process groudtruth data only if filename is empty and no test_dev.
self.dataset['images'].append({
'id': int(image_id),
})
# Add annotations.
indices = np.where(groundtruth_data[i, :, -1] > -1)[0]
for data in groundtruth_data[i, indices]:
box = data[0:4]
is_crowd = data[4]
area = (box[3] - box[1]) * (box[2] - box[0])
category_id = data[6]
if category_id < 0:
break
self.dataset['annotations'].append({
'id': int(self.annotation_id),
'image_id': int(image_id),
'category_id': int(category_id),
'bbox': [box[1], box[0], box[3] - box[1], box[2] - box[0]],
'area': area,
'iscrowd': int(is_crowd)
})
self.annotation_id += 1
self.category_ids.append(category_id)
self.image_id += 1
if not self.filename:
self.category_ids = list(set(self.category_ids))
self.dataset['categories'] = [
{'id': int(category_id)} for category_id in self.category_ids
]
def gather(self):
"""Gather."""
self.detections = hvd.allgather(self.detections)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/coco_metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for processing config file to run EfficientDet pipelines."""
import six
def eval_str(s):
"""If s is a string, return the eval results. Else return itself."""
if isinstance(s, six.string_types):
if len(s) > 0:
return eval(s)
return None
return s
def generate_params_from_cfg(default_hparams, cfg, mode):
"""Generate parameters from experient cfg."""
spec_checker(cfg)
if cfg['model']['aspect_ratios']:
aspect_ratios = eval_str(cfg['model']['aspect_ratios'])
if not isinstance(aspect_ratios, list):
raise SyntaxError("aspect_ratios should be a list of tuples.")
else:
aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
return dict(
default_hparams.as_dict(),
# model
name=cfg['model']['name'],
model_name=cfg['model']['name'],
aspect_ratios=aspect_ratios,
anchor_scale=cfg['model']['anchor_scale'] or 4,
min_level=cfg['model']['min_level'] or 3,
max_level=cfg['model']['max_level'] or 7,
num_scales=cfg['model']['num_scales'] or 3,
freeze_bn=cfg['model']['freeze_bn'],
freeze_blocks=cfg['model']['freeze_blocks']
if cfg['model']['freeze_blocks'] else None,
image_size=(cfg['model']['input_height'], cfg['model']['input_width']),
# data config
val_json_file=cfg['dataset']['val_json_file'],
num_classes=cfg['dataset']['num_classes'],
max_instances_per_image=cfg['dataset']['max_instances_per_image'] or 100,
skip_crowd_during_training=cfg['dataset']['skip_crowd_during_training'],
# Loader config
shuffle_file=cfg['dataset']['loader']['shuffle_file'],
shuffle_buffer=cfg['dataset']['loader']['shuffle_buffer'] or 1024,
cycle_length=cfg['dataset']['loader']['cycle_length'] or 32,
block_length=cfg['dataset']['loader']['block_length'] or 16,
prefetch_size=cfg['dataset']['loader']['prefetch_size'] or 2, # set to 0 for AUTOTUNE
# augmentation config
input_rand_hflip=cfg['dataset']['augmentation']['rand_hflip'],
jitter_min=cfg['dataset']['augmentation']['random_crop_min_scale'] or 0.1,
jitter_max=cfg['dataset']['augmentation']['random_crop_max_scale'] or 2.0,
auto_color=cfg['dataset']['augmentation']['auto_color_distortion'],
auto_translate_xy=cfg['dataset']['augmentation']['auto_translate_xy'],
auto_augment=cfg['dataset']['augmentation']['auto_color_distortion'] or cfg['dataset']['augmentation']['auto_translate_xy'],
# train config
num_examples_per_epoch=cfg['train']['num_examples_per_epoch'],
checkpoint=cfg['train']['checkpoint'],
mode=mode,
is_training_bn=mode == 'train',
checkpoint_interval=cfg['train']['checkpoint_interval'],
train_batch_size=cfg['train']['batch_size'],
seed=cfg['train']['random_seed'] or 42,
pruned_model_path=cfg['train']['pruned_model_path'],
moving_average_decay=cfg['train']['moving_average_decay'],
mixed_precision=cfg['train']['amp'] and not cfg['train']['qat'],
qat=cfg['train']['qat'],
data_format=cfg['data_format'],
l2_weight_decay=cfg['train']['l2_weight_decay'],
l1_weight_decay=cfg['train']['l1_weight_decay'],
clip_gradients_norm=cfg['train']['clip_gradients_norm'] or 5.0,
skip_checkpoint_variables=cfg['train']['skip_checkpoint_variables'],
num_epochs=cfg['train']['num_epochs'],
image_preview=cfg['train']['image_preview'],
init_epoch=cfg['train']['init_epoch'],
# LR config
lr_decay_method=cfg['train']['lr_schedule']['name'],
learning_rate=cfg['train']['lr_schedule']['learning_rate'],
lr_warmup_epoch=cfg['train']['lr_schedule']['warmup_epoch'] or 5,
lr_warmup_init=cfg['train']['lr_schedule']['warmup_init'] or 0.00001,
lr_annealing_epoch=cfg['train']['lr_schedule']['annealing_epoch'] or 10,
# Optimizer config
momentum=cfg['train']['optimizer']['momentum'] or 0.9,
optimizer=cfg['train']['optimizer']['name'] or 'sgd',
# eval config
eval_batch_size=cfg['evaluate']['batch_size'],
eval_samples=cfg['evaluate']['num_samples'],
eval_start=cfg['evaluate']['start_eval_epoch'],
eval_label_map=cfg['evaluate']['label_map'],
eval_sigma=cfg['evaluate']['sigma'],
max_output_size=cfg['evaluate']['max_detections_per_image'],
max_nms_inputs=cfg['evaluate']['max_nms_inputs'],
#
results_dir=cfg['results_dir'],
encryption_key=cfg['encryption_key'],
iou_loss_type=cfg['train']['iou_loss_type'],
box_loss_weight=cfg['train']['box_loss_weight'],
iou_loss_weight=cfg['train']['iou_loss_weight'],
label_smoothing=cfg['train']['label_smoothing'],
)
def spec_checker(cfg):
"""Check if parameters in the spec file are valid.
Args:
cfg: Hydra config.
"""
assert cfg.data_format == 'channels_last', "Only `channels_last` data format is supported."
# training config
assert cfg.train.batch_size > 0, \
"batch size for training must be positive."
assert cfg.train.checkpoint_interval > 0, \
"checkpoint interval must be positive."
assert cfg.train.num_examples_per_epoch > 0, \
"Number of samples must be positive."
assert cfg.train.num_epochs >= \
cfg.train.checkpoint_interval, \
"num_epochs must be positive and no less than checkpoint_interval."
assert 0 <= cfg.train.moving_average_decay < 1, \
"Moving average decay must be within [0, 1)."
assert 0 < cfg.train.lr_schedule.warmup_init < 1, \
"The initial learning rate during warmup must be within (0, 1)."
assert cfg.train.lr_schedule.learning_rate > 0, \
"learning_rate must be positive."
assert cfg.train.num_epochs >= cfg.train.lr_schedule.warmup_epoch >= 0, \
"warmup_epoch must be within [0, num_epochs]."
# model config
assert 'efficientdet-d' in str(cfg.model.name), \
"model name can be chosen from efficientdet-d0 to efficientdet-d5."
assert cfg.model.min_level == 3, "min_level must be 3"
assert cfg.model.max_level == 7, "max_level must be 7"
# eval config
assert cfg.evaluate.num_samples >= cfg.evaluate.batch_size > 0, \
"batch size for evaluation must be (0, num_samples]"
assert cfg.train.num_epochs >= cfg.evaluate.start_eval_epoch >= 0, \
"start_eval_epoch must be within [0, num_epochs]."
# dataset config
assert cfg.dataset.train_tfrecords, \
"train_tfrecords must be specified."
assert cfg.dataset.val_tfrecords, \
"val_tfrecords must be specified."
assert 1 < cfg.dataset.num_classes, \
"num_classes is number of categories + 1 (background). It must be greater than 1."
# augmentation config
assert cfg.dataset.augmentation.random_crop_max_scale >= cfg.dataset.augmentation.random_crop_min_scale > 0, \
"random_crop_min_scale should be positive and no greater than random_crop_max_scale."
assert cfg.prune.equalization_criterion in \
['arithmetic_mean', 'geometric_mean', 'union', 'intersection'], \
"Equalization criterion are [arithmetic_mean, geometric_mean, union, \
intersection]."
assert cfg.prune.normalizer in ['L2', 'max'], \
"normalizer options are [L2, max]."
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/config_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common keras utils."""
import logging
from typing import Text
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.model import normalization_builder
def build_batch_norm(is_training_bn: bool,
beta_initializer: Text = 'zeros',
gamma_initializer: Text = 'ones',
data_format: Text = 'channels_last',
momentum: float = 0.99,
epsilon: float = 1e-3,
name: Text = 'tpu_batch_normalization'):
"""Build a batch normalization layer.
Args:
is_training_bn: `bool` for whether the model is training.
beta_initializer: `str`, beta initializer.
gamma_initializer: `str`, gamma initializer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
axis = 1 if data_format == 'channels_first' else -1
batch_norm_class = normalization_builder.batch_norm_class(is_training_bn)
bn_layer = batch_norm_class(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
name=name)
return bn_layer
def get_ema_vars(model):
"""Get all exponential moving average (ema) variables."""
ema_vars = model.trainable_weights
for v in model.weights:
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
ema_vars_dict = {}
# Remove duplicate vars
for var in ema_vars:
ema_vars_dict[var.ref()] = var
return ema_vars_dict
def average_name(ema, var):
"""Returns the name of the `Variable` holding the average for `var`.
A hacker for tf2.
Args:
ema: A `ExponentialMovingAverage` object.
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of `var`.
"""
if var.ref() in ema._averages: # pylint: disable=protected-access
return ema._averages[var.ref()].name.split(':')[0] # pylint: disable=protected-access
return tf.compat.v1.get_default_graph().unique_name(
var.name.split(':')[0] + '/' + ema.name, mark_as_used=False)
def restore_ckpt(model,
ckpt_path_or_file,
ema_decay=0.9998,
steps_per_epoch=0,
skip_mismatch=True,
expect_partial=False):
"""Restore variables from a given checkpoint.
Args:
model: the keras model to be restored.
ckpt_path_or_file: the path or file for checkpoint.
ema_decay: ema decay rate. If None or zero or negative value, disable ema.
steps_per_epoch: number of iterations in each training epoch
skip_mismatch: whether to skip variables if shape mismatch.
expect_partial: this will supress warnings when variables mismatch
"""
if ckpt_path_or_file == '_':
logging.info('Running test: do not load any ckpt.')
return 0
if tf.io.gfile.isdir(ckpt_path_or_file):
ckpt_path_or_file = tf.train.latest_checkpoint(ckpt_path_or_file)
if not ckpt_path_or_file:
return 0
if (tf.train.list_variables(ckpt_path_or_file)[0][0] ==
'_CHECKPOINTABLE_OBJECT_GRAPH'):
if expect_partial:
model.load_weights(ckpt_path_or_file).expect_partial()
else:
model.load_weights(ckpt_path_or_file)
logging.debug('Restored checkpoint with load_weights method!')
else:
if ema_decay > 0:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = get_ema_vars(model)
var_dict = {
average_name(ema, var): var for (ref, var) in ema_vars.items()
}
else:
ema_vars = get_ema_vars(model)
var_dict = {
var.name.split(':')[0]: var for (ref, var) in ema_vars.items()
}
# add variables that not in var_dict
for v in model.weights:
if v.ref() not in ema_vars:
var_dict[v.name.split(':')[0]] = v
# try to load graph-based checkpoint with ema support,
# else load checkpoint via keras.load_weights which doesn't support ema.
for i, (key, var) in enumerate(var_dict.items()):
try:
var.assign(tf.train.load_variable(ckpt_path_or_file, key))
if i < 10:
logging.info('Init %s from %s (%s)', var.name, key, ckpt_path_or_file)
except tf.errors.NotFoundError as e:
if skip_mismatch:
logging.warning('Not found %s in %s', key, ckpt_path_or_file)
else:
raise e
except ValueError as e:
if skip_mismatch:
logging.warning('%s: %s', key, e)
else:
raise e
if steps_per_epoch > 0:
last_iteration = model.optimizer.iterations
ckpt_epoch = last_iteration // steps_per_epoch
logging.debug("Restored checkpoint at epoch: %s", ckpt_epoch)
return ckpt_epoch
return 0
def get_mixed_precision_policy():
"""Get mixed precision policy."""
current_version = tuple(map(int, tf.__version__.split('.')))[:3]
threshold_version = (2, 4, 0) # The threshold tensorflow version is 2.4.0
return tf.keras.mixed_precision.global_policy() if current_version >= threshold_version \
else tf.keras.mixed_precision.experimental.global_policy()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/utils/keras_utils.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
from six.moves import zip
import tensorflow.compat.v1 as tf
from nvidia_tao_tf2.cv.efficientdet.visualize import static_shape
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None, parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0] or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all(isinstance(output, tf.Tensor) for output in outputs):
return tf.stack(outputs)
if all(isinstance(output, list) for output in outputs):
if all(all(isinstance(entry, tf.Tensor) for entry in output_list)
for output_list in outputs):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
[f'image size must be >= {min_dim} in both height and width.'])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
f"image size must be >= {min_dim} in both height and width; "
"image dim = {image_height},{image_width}")
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError(f'Unequal shapes {shape_a}, {shape_b}')
return tf.no_op()
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError(f'Unequal first dimension {shape_a[0]}, {shape_b[0]}')
return tf.no_op()
return tf.assert_equal(shape_a[0], shape_b[0])
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
def flatten_dimensions(inputs, first, last):
"""Flattens `K-d` tensor along [first, last) dimensions.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_dimensions(inputs, first=1, last=3)
new_tensor.shape -> [10, 100, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
first: first value for the range of dimensions to flatten.
last: last value for the range of dimensions to flatten. Note that the last
dimension itself is excluded.
Returns:
a tensor with shape
[D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ...,
D(K-1)].
Raises:
ValueError: if first and last arguments are incorrect.
"""
if first >= inputs.shape.ndims or last > inputs.shape.ndims:
raise ValueError("`first` and `last` must be less than inputs.shape.ndims. "
f"found {first} and {last} respectively while ndims is {inputs.shape.ndims}")
shape = combined_static_and_dynamic_shape(inputs)
flattened_dim_prod = tf.reduce_prod(shape[first:last], keepdims=True)
new_shape = tf.concat([shape[:first], flattened_dim_prod,
shape[last:]], axis=0)
return tf.reshape(inputs, new_shape)
def flatten_first_n_dimensions(inputs, n):
"""Flattens `K-d` tensor along first n dimension to be a `(K-n+1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_first_n_dimensions(inputs, 2)
new_tensor.shape -> [50, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
n: The number of dimensions to flatten.
Returns:
a tensor with shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
"""
return flatten_dimensions(inputs, first=0, last=n)
def expand_first_dimension(inputs, dims):
"""Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
Example:
`inputs` is a tensor with shape [50, 20, 20, 3].
new_tensor = expand_first_dimension(inputs, [10, 5]).
new_tensor.shape -> [10, 5, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
dims: List with new dimensions to expand first axis into. The length of
`dims` is typically 2 or larger.
Returns:
a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
"""
inputs_shape = combined_static_and_dynamic_shape(inputs)
expanded_shape = tf.stack(dims + inputs_shape[1:])
# Verify that it is possible to expand the first axis of inputs.
assert_op = tf.assert_equal(
inputs_shape[0], tf.reduce_prod(tf.stack(dims)),
message=('First dimension of `inputs` cannot be expanded into provided `dims`'))
with tf.control_dependencies([assert_op]):
inputs_reshaped = tf.reshape(inputs, expanded_shape)
return inputs_reshaped
def resize_images_and_return_shapes(inputs, image_resizer_fn):
"""Resizes images using the given function and returns their true shapes.
Args:
inputs: a float32 Tensor representing a batch of inputs of shape
[batch_size, height, width, channels].
image_resizer_fn: a function which takes in a single image and outputs
a resized image and its original shape.
Returns:
resized_inputs: The inputs resized according to image_resizer_fn.
true_image_shapes: A integer tensor of shape [batch_size, 3]
representing the height, width and number of channels in inputs.
"""
if inputs.dtype is not tf.float32:
raise ValueError('`resize_images_and_return_shapes` expects a tf.float32 tensor')
# TODO(jonathanhuang): revisit whether to always use batch size as
# the number of parallel iterations vs allow for dynamic batching.
outputs = static_or_dynamic_map_fn(
image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32])
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return resized_inputs, true_image_shapes
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/visualize/shape_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for bbox visualization."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/visualize/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
image_additional_channels: additional channels.
original_image: image in the original input size.
original_image_spatial_shape: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_image_confidences: image-level class confidences.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_confidences: box-level class confidences. The shape should be
the same as the shape of groundtruth_classes.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_keypoint_weights: groundtruth weight factor for keypoints.
groundtruth_label_weights: groundtruth label weights.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
is_annotated: whether an image has been labeled or not.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
multiclass_scores: the label score per class for each box.
context_features: a flattened list of contextual features.
context_feature_length: the fixed length of each feature in
context_features, used for reshaping.
valid_context_size: the valid context size, used in filtering the padded
context features.
"""
image = 'image'
image_additional_channels = 'image_additional_channels'
original_image = 'original_image'
original_image_spatial_shape = 'original_image_spatial_shape'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_image_confidences = 'groundtruth_image_confidences'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_confidences = 'groundtruth_confidences'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_keypoint_weights = 'groundtruth_keypoint_weights'
groundtruth_label_weights = 'groundtruth_label_weights'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
is_annotated = 'is_annotated'
true_image_shape = 'true_image_shape'
multiclass_scores = 'multiclass_scores'
context_features = 'context_features'
context_feature_length = 'context_feature_length'
valid_context_size = 'valid_context_size'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_multiclass_scores: class score distribution (including background)
for detection boxes in the image including background class.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
detection_keypoint_scores: contains detection keypoint scores.
num_detections: number of detections in the batch.
raw_detection_boxes: contains decoded detection boxes without Non-Max
suppression.
raw_detection_scores: contains class score logits for raw detection boxes.
detection_anchor_indices: The anchor indices of the detections after NMS.
detection_features: contains extracted features for each detected box
after NMS.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_multiclass_scores = 'detection_multiclass_scores'
detection_features = 'detection_features'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
detection_keypoint_scores = 'detection_keypoint_scores'
num_detections = 'num_detections'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_scores = 'raw_detection_scores'
detection_anchor_indices = 'detection_anchor_indices'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
is_crowd: is_crowd annotation per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
confidences = 'confidences'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_heatmaps = 'keypoint_heatmaps'
is_crowd = 'is_crowd'
class PredictionFields(object):
"""Naming conventions for standardized prediction outputs.
Attributes:
feature_maps: List of feature maps for prediction.
anchors: Generated anchors.
raw_detection_boxes: Decoded detection boxes without NMS.
raw_detection_feature_map_indices: Feature map indices from which each raw
detection box was produced.
"""
feature_maps = 'feature_maps'
anchors = 'anchors'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_feature_map_indices = 'raw_detection_feature_map_indices'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/visualize/standard_fields.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.visualize import shape_utils
from nvidia_tao_tf2.cv.efficientdet.visualize import standard_fields as fields
matplotlib.use('Agg') # Set headless-friendly backend.
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4-6]: masks (optional)
[4-6]: keypoints (optional)
[4-6]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be: image - uint8 numpy array
with shape (img_height, img_width, 3). boxes - a numpy array of shape
[N, 4]. classes - a numpy array of shape [N]. scores - a numpy array of
shape [N] or None. -- Optional positional arguments -- instance_masks -
a numpy array of shape [N, image_height, image_width]. keypoints - a
numpy array of shape [N, num_keypoints, 2]. track_ids - a numpy array of
shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes). Default is
True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates). Default is
True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if key not in [input_data_fields.original_image, input_data_fields.image_additional_channels]:
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat( # noqa pylint: disable=E1123
[images_with_detections, images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat( # noqa pylint: disable=E1123
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_keypoints_on_image_array(
image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates, keypoint_edges,
keypoint_edge_color, keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = [im_width * x for x in keypoints_x] # removed tuple(*)
keypoints_y = [im_height * y for y in keypoints_y] # removed tuple(*)
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color,
fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with values
between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError(f'The image has spatial dimensions {image.shape[:2]} but the mask has '
'dimensions {mask.shape}')
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then this
function assumes that the boxes to be plotted are groundtruth boxes and
plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can be None
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as normalized
coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all
boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = f'{int(100 * scores[i])}%'
else:
display_str = f'{display_str}: {int(100 * scores[i])}%'
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = f'ID {track_ids[i]}'
else:
display_str = f'{display_str}: ID {track_ids[i]}'
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[(prime_multipler * track_ids[i]) %
len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image, box_to_instance_masks_map[box], color=color)
if instance_boundaries is not None:
draw_mask_on_image_array(
image, box_to_instance_boundaries_map[box], color='red', alpha=1.0)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (
np.arange(cumulative_values.size, dtype=np.float32) /
cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.frombuffer(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.frombuffer(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
def denormalize_image(image):
"""De-normalize image array.
Args:
image: numpy array [H, W, C]
Return:
output: numpy array [H, W, C]
"""
scale = np.array([0.224, 0.224, 0.224])[None][None]
mean = np.array([0.485, 0.456, 0.406])[None][None]
output = (image * scale + mean) * 255
return output.astype(np.uint8)
def visualize_detections(image_path, output_path, detections, labels):
"""Visualize detections."""
image = Image.open(image_path).convert(mode='RGB')
draw = ImageDraw.Draw(image)
line_width = 2
font = ImageFont.load_default()
for d in detections:
color = STANDARD_COLORS[d['class'] % len(STANDARD_COLORS)]
draw.line([(d['xmin'], d['ymin']), (d['xmin'], d['ymax']),
(d['xmax'], d['ymax']), (d['xmax'], d['ymin']),
(d['xmin'], d['ymin'])], width=line_width, fill=color)
label = f"Class {d['class']}"
if d['class'] < len(labels):
label = str(labels[d['class']])
score = d['score']
text = f"{label}: {int(100 * score)}%"
if score < 0:
text = label
text_width, text_height = font.getsize(text)
text_bottom = max(text_height, d['ymin'])
text_left = d['xmin']
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(text_left, text_bottom - text_height - 2 * margin), (text_left + text_width, text_bottom)],
fill=color)
draw.text(
(text_left + margin, text_bottom - text_height - margin),
text, fill='black', font=font)
image.save(output_path)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/visualize/vis_utils.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to access TensorShape values.
The rank 4 tensor_shape must be of the form [batch_size, height, width, depth].
"""
def get_dim_as_int(dim):
"""Utility to get v1 or v2 TensorShape dim as an int.
Args:
dim: The TensorShape dimension to get as an int
Returns:
None or an int.
"""
try:
return dim.value
except AttributeError:
return dim
def get_batch_size(tensor_shape):
"""Returns batch size from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the batch size of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[0])
def get_height(tensor_shape):
"""Returns height from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the height of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[1])
def get_width(tensor_shape):
"""Returns width from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the width of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[2])
def get_depth(tensor_shape):
"""Returns depth from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the depth of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[3])
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/visualize/static_shape.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit EfficientDet scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export EfficientDet model to etlt and TRT engine."""
import logging
import os
import tempfile
import tensorflow as tf
from tensorflow.python.util import deprecation
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
import nvidia_tao_tf2.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf2.common.utils import update_results_dir
from nvidia_tao_tf2.cv.efficientdet.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.efficientdet.exporter.onnx_exporter import EfficientDetGraphSurgeon
from nvidia_tao_tf2.cv.efficientdet.inferencer import inference
from nvidia_tao_tf2.cv.efficientdet.utils import helper, hparams_config
from nvidia_tao_tf2.cv.efficientdet.utils.config_utils import generate_params_from_cfg
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import initialize
deprecation._PRINT_DEPRECATION_WARNINGS = False
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
os.environ["TF_CPP_VMODULE"] = 'non_max_suppression_op=0,generate_box_proposals_op=0,executor=0'
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level='INFO')
logger = logging.getLogger(__name__)
@monitor_status(name='efficientdet', mode='export')
def run_export(cfg):
"""Launch EfficientDet export."""
# Parse and update hparams
MODE = 'export'
config = hparams_config.get_detection_config(cfg.model.name)
config.update(generate_params_from_cfg(config, cfg, mode=MODE))
params = config.as_dict()
assert cfg.export.onnx_file.endswith('.onnx'), "Exported file must end with .onnx"
output_dir = tempfile.mkdtemp()
tf.keras.backend.set_learning_phase(0)
# Load model from graph json
model = helper.load_model(cfg.export.checkpoint, cfg, MODE, is_qat=cfg.train.qat)
model.summary()
# Get input shape from model
input_shape = list(model.layers[0].input_shape[0])
max_batch_size = 1 if cfg.export.dynamic_batch_size else cfg.export.batch_size
input_shape[0] = max_batch_size
# Build inference model
export_model = inference.InferenceModel(
model, config.image_size, params, max_batch_size,
min_score_thresh=0.001, # a small value
max_boxes_to_draw=cfg.evaluate.max_detections_per_image)
export_model.infer = tf.function(export_model.infer)
tf.saved_model.save(
export_model,
output_dir,
signatures=export_model.infer.get_concrete_function(
tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.uint8)))
# convert to onnx
effdet_gs = EfficientDetGraphSurgeon(
output_dir,
legacy_plugins=False,
dynamic_batch=cfg.export.dynamic_batch_size,
is_qat=cfg.train.qat)
effdet_gs.update_preprocessor('NHWC', input_shape, preprocessor="imagenet")
effdet_gs.update_shapes()
effdet_gs.update_nms(threshold=cfg.export.min_score_thresh)
# convert onnx to eff
onnx_file = effdet_gs.save(cfg.export.onnx_file)
logger.info("The exported model is saved at: %s", onnx_file)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="export", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig):
"""Wrapper function for EfficientDet export."""
cfg = update_results_dir(cfg, 'export')
initialize(cfg, logger, training=False)
run_export(cfg=cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert raw COCO dataset to TFRecord for object_detection."""
import collections
from collections import Counter
import hashlib
import io
import json
import multiprocessing
import os
import numpy as np
import PIL.Image
from pycocotools import mask
from skimage import measure
import tensorflow as tf
from nvidia_tao_tf2.common.dataset import dataset_util
from nvidia_tao_tf2.common.dataset import label_map_util
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
import nvidia_tao_tf2.common.logging.logging as status_logging
from nvidia_tao_tf2.common.utils import update_results_dir
from nvidia_tao_tf2.cv.efficientdet.config.default_config import ExperimentConfig
def create_tf_example(image,
bbox_annotations,
image_dir,
category_index,
include_masks=False,
inspect_mask=True):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys:
[u'license', u'file_name', u'coco_url', u'height', u'width',
u'date_captured', u'flickr_url', u'id']
bbox_annotations:
list of dicts with keys:
[u'segmentation', u'area', u'iscrowd', u'image_id',
u'bbox', u'category_id', u'id']
Notice that bounding box coordinates in the official COCO dataset are
given as [x, y, width, height] tuples using absolute coordinates where
x, y represent the top-left (0-indexed) corner. This function converts
to the format expected by the Tensorflow Object Detection API (which is
which is [ymin, xmin, ymax, xmax] with coordinates normalized relative
to image size).
image_dir: directory containing the image files.
category_index: a dict containing COCO category information keyed
by the 'id' field of each category. See the
label_map_util.create_category_index function.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
encoded_mask_png = []
num_annotations_skipped = 0
log_warnings = {}
cat_counter = Counter()
box_oob = [] # out of bound boxes
mask_oob = [] # out of bound masks
for object_annotations in bbox_annotations:
object_annotations_id = object_annotations['id']
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0: # or x + width > image_width or y + height > image_height
num_annotations_skipped += 1
box_oob.append(object_annotations_id)
continue
# correct label errors
left = max(x, 0)
top = max(y, 0)
right = min(left + width, image_width)
bottom = min(top + height, image_height)
if right - left < 1 or bottom - top < 1:
num_annotations_skipped += 1
box_oob.append(object_annotations_id)
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
is_crowd.append(object_annotations['iscrowd'])
category_id = int(object_annotations['category_id'])
category_ids.append(category_id)
category_names.append(category_index[category_id]['name'].encode('utf8'))
if str(category_index[category_id]['name']) in cat_counter:
cat_counter[str(category_index[category_id]['name'])] += 1
else:
cat_counter[str(category_index[category_id]['name'])] = 1
area.append(object_annotations['area'])
if include_masks:
if 'segmentation' not in object_annotations:
raise ValueError(
f"segmentation groundtruth is missing in object: {object_annotations_id}.")
# pylygon (e.g. [[289.74,443.39,302.29,445.32, ...], [1,2,3,4]])
if isinstance(object_annotations['segmentation'], list):
rles = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
rle = mask.merge(rles)
elif 'counts' in object_annotations['segmentation']:
# e.g. {'counts': [6, 1, 40, 4, 5, 4, 5, 4, 21], 'size': [9, 10]}
if isinstance(object_annotations['segmentation']['counts'], list):
rle = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
else:
rle = object_annotations['segmentation']
else:
raise ValueError('Please check the segmentation format.')
binary_mask = mask.decode(rle)
contours = measure.find_contours(binary_mask, 0.5)
if inspect_mask:
# check if mask is out of bound compared to bbox
min_x, max_x = image_width + 1, -1
min_y, max_y = image_height + 1, -1
for cont in contours:
c = np.array(cont)
min_x = min(min_x, np.amin(c, axis=0)[1])
max_x = max(max_x, np.amax(c, axis=0)[1])
min_y = min(min_y, np.amin(c, axis=0)[0])
max_y = max(max_y, np.amax(c, axis=0)[0])
xxmin, xxmax, yymin, yymax = \
float(x) - 1, float(x + width) + 1, float(y) - 1, float(y + height) + 1
if xxmin > min_x or yymin > min_y or xxmax < max_x or yymax < max_y:
mask_oob.append(object_annotations_id)
# if not object_annotations['iscrowd']:
# binary_mask = np.amax(binary_mask, axis=2)
pil_image = PIL.Image.fromarray(binary_mask)
output_io = io.BytesIO()
pil_image.save(output_io, format='PNG')
encoded_mask_png.append(output_io.getvalue())
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/class/label':
dataset_util.int64_list_feature(category_ids),
'image/object/is_crowd':
dataset_util.int64_list_feature(is_crowd),
'image/object/area':
dataset_util.float_list_feature(area),
}
if include_masks:
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
if mask_oob or box_oob:
log_warnings[image_id] = {}
log_warnings[image_id]['box'] = box_oob
log_warnings[image_id]['mask'] = mask_oob
return key, example, num_annotations_skipped, log_warnings, cat_counter
def _pool_create_tf_example(args):
return create_tf_example(*args)
def _load_object_annotations(object_annotations_file):
with tf.io.gfile.GFile(object_annotations_file, 'r') as fid:
obj_annotations = json.load(fid)
images = obj_annotations['images']
category_index = label_map_util.create_category_index(
obj_annotations['categories'])
img_to_obj_annotation = collections.defaultdict(list)
tf.compat.v1.logging.info('Building bounding box index.')
for annotation in obj_annotations['annotations']:
image_id = annotation['image_id']
img_to_obj_annotation[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in img_to_obj_annotation:
missing_annotation_count += 1
tf.compat.v1.logging.info('%d images are missing bboxes.', missing_annotation_count)
return images, img_to_obj_annotation, category_index
def _merge_log(log_a, log_b):
log_ab = log_a.copy()
for k, v in log_b.items():
if k in log_ab:
log_ab[k] += v
else:
log_ab[k] = v
return log_ab
def _create_tf_record_from_coco_annotations(object_annotations_file,
image_dir, output_path, include_masks, num_shards):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
object_annotations_file: JSON file containing bounding box annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
num_shards: Number of output files to create.
"""
tf.compat.v1.logging.info('writing to output path: %s', output_path)
writers = [
tf.io.TFRecordWriter(
output_path + '-%05d-of-%05d.tfrecord' % # noqa pylint: disable=C0209
(i, num_shards)) for i in range(num_shards)
]
images, img_to_obj_annotation, category_index = (
_load_object_annotations(object_annotations_file))
pool = multiprocessing.Pool() # noqa pylint: disable=R1732
total_num_annotations_skipped = 0
log_total = {}
cat_total = Counter()
for idx, (_, tf_example, num_annotations_skipped, log_warnings, cats) in enumerate(
pool.imap(_pool_create_tf_example, [(
image,
img_to_obj_annotation[image['id']],
image_dir,
category_index,
include_masks) for image in images])):
if idx % 100 == 0:
tf.compat.v1.logging.info('On image %d of %d', idx, len(images))
total_num_annotations_skipped += num_annotations_skipped
log_total = _merge_log(log_total, log_warnings)
cat_total.update(cats)
writers[idx % num_shards].write(tf_example.SerializeToString())
pool.close()
pool.join()
for writer in writers:
writer.close()
tf.compat.v1.logging.info(
'Finished writing, skipped %d annotations.', total_num_annotations_skipped)
return log_total, cat_total
@monitor_status(name="efficientdet", mode="data conversion")
def run_conversion(cfg):
"""Run data conversion."""
# config output files
if not os.path.exists(cfg.results_dir):
os.makedirs(cfg.results_dir, exist_ok=True)
tag = cfg.dataset_convert.tag or os.path.splitext(os.path.basename(cfg.dataset_convert.annotations_file))[0]
output_path = os.path.join(cfg.results_dir, tag)
log_total, cat_total = _create_tf_record_from_coco_annotations(
cfg.dataset_convert.annotations_file,
cfg.dataset_convert.image_dir,
output_path,
cfg.dataset_convert.include_masks,
num_shards=cfg.dataset_convert.num_shards)
if log_total:
with open(os.path.join(cfg.results_dir, f'{tag}_warnings.json'), "w", encoding='utf-8') as f:
json.dump(log_total, f)
status_logging.get_status_logger().categorical = {'num_objects': cat_total}
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="dataset_convert", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Convert COCO format json and images into TFRecords."""
cfg = update_results_dir(cfg, 'dataset_convert')
run_conversion(cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main training script."""
import logging
import os
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_tf2.common.mlops.utils import init_mlops
import nvidia_tao_tf2.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf2.common.utils import update_results_dir
from nvidia_tao_tf2.cv.efficientdet.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.efficientdet.dataloader import dataloader, datasource
from nvidia_tao_tf2.cv.efficientdet.model.efficientdet_module import EfficientDetModule
from nvidia_tao_tf2.cv.efficientdet.model import callback_builder
from nvidia_tao_tf2.cv.efficientdet.trainer.efficientdet_trainer import EfficientDetTrainer
from nvidia_tao_tf2.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf2.cv.efficientdet.utils.config_utils import generate_params_from_cfg
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import is_main_process, initialize
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import get_world_size, get_rank
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level='INFO')
logger = logging.getLogger(__name__)
@monitor_status(name='efficientdet', mode='training')
def run_experiment(cfg):
"""Run training experiment."""
# Parse and update hparams
config = hparams_config.get_detection_config(cfg.model.name)
config.update(generate_params_from_cfg(config, cfg, mode='train'))
if is_main_process():
init_mlops(cfg, name='efficientdet')
# Set up dataloader
train_sources = datasource.DataSource(
cfg.dataset.train_tfrecords,
cfg.dataset.train_dirs)
train_dl = dataloader.CocoDataset(
train_sources,
is_training=True,
use_fake_data=cfg.dataset.use_fake_data,
max_instances_per_image=config.max_instances_per_image)
train_dataset = train_dl(
config.as_dict(),
batch_size=cfg.train.batch_size)
# eval data
eval_sources = datasource.DataSource(
cfg.dataset.val_tfrecords,
cfg.dataset.val_dirs)
eval_dl = dataloader.CocoDataset(
eval_sources,
is_training=False,
max_instances_per_image=config.max_instances_per_image)
eval_dataset = eval_dl(
config.as_dict(),
batch_size=cfg.evaluate.batch_size)
efficientdet = EfficientDetModule(config)
# set up callbacks
callbacks = callback_builder.get_callbacks(
config,
eval_dataset.shard(get_world_size(), get_rank()).take(config.eval_samples),
efficientdet.steps_per_epoch,
eval_model=efficientdet.eval_model,
initial_epoch=efficientdet.initial_epoch)
trainer = EfficientDetTrainer(
num_epochs=config.num_epochs,
callbacks=callbacks)
trainer.fit(
efficientdet,
train_dataset,
eval_dataset,
verbose=1 if is_main_process() else 0)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="train", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for EfficientDet training."""
cfg = update_results_dir(cfg, 'train')
initialize(cfg, logger, training=True)
run_experiment(cfg=cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prune the EfficientDet TAO model."""
import logging
import os
import tempfile
import tensorflow as tf
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
import nvidia_tao_tf2.common.logging.logging as status_logging
from nvidia_tao_tf2.common.utils import get_model_file_size, update_results_dir
from nvidia_tao_tf2.cv.efficientdet.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.efficientdet.pruner.pruner import EfficientDetPruner
from nvidia_tao_tf2.cv.efficientdet.utils.helper import dump_eval_json, dump_json, encode_eff
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import initialize
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.get_logger().setLevel('ERROR')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level='INFO')
logger = logging.getLogger(__name__)
@monitor_status(name='efficientdet', mode='pruning')
def run_pruning(cfg):
"""Prune an encrypted Keras model."""
# Set up EfficientDet pruner
pruner = EfficientDetPruner(cfg)
# Pruning trained model
pruned_model = pruner.prune(
threshold=cfg.prune.threshold,
excluded_layers=list(cfg.prune.excluded_layers))
# Save the encrypted pruned model
tmp_dir = tempfile.mkdtemp()
dump_json(pruned_model, os.path.join(tmp_dir, 'train_graph.json'))
dump_eval_json(tmp_dir, eval_graph='eval_graph.json')
pruned_model.save_weights(os.path.join(tmp_dir, 'prunedckpt'))
pruned_model.summary()
# Convert to EFF
output_path = os.path.join(
cfg.prune.results_dir,
f'model_th={cfg.prune.threshold}_eq={cfg.prune.equalization_criterion}.tlt')
encode_eff(tmp_dir, output_path, cfg.encryption_key, is_pruned=True)
pruning_ratio = pruned_model.count_params() / pruner.model.count_params()
logger.info("Pruning ratio (pruned model / original model): %s", pruning_ratio)
status_logging.get_status_logger().kpi.update(
{'pruning_ratio': float(pruning_ratio),
'param_count': pruned_model.count_params(),
'size': get_model_file_size(output_path)})
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="prune", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for EfficientDet pruning."""
cfg = update_results_dir(cfg, 'prune')
initialize(cfg, logger, training=False)
run_pruning(cfg=cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet standalone inference."""
import logging
import os
import tensorflow as tf
from tensorflow.python.util import deprecation
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
import nvidia_tao_tf2.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf2.common.utils import update_results_dir
from nvidia_tao_tf2.cv.efficientdet.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.efficientdet.inferencer import inference
from nvidia_tao_tf2.cv.efficientdet.utils import helper, hparams_config, label_utils
from nvidia_tao_tf2.cv.efficientdet.utils.config_utils import generate_params_from_cfg
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import initialize
deprecation._PRINT_DEPRECATION_WARNINGS = False
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
os.environ["TF_CPP_VMODULE"] = 'non_max_suppression_op=0,generate_box_proposals_op=0,executor=0'
supported_img_format = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level='INFO')
logger = logging.getLogger(__name__)
def get_label_dict(label_txt):
"""Create label dict from txt file."""
with open(label_txt, 'r', encoding='utf-8') as f:
labels = f.readlines()
return {i + 1: label[:-1] for i, label in enumerate(labels)}
def batch_generator(iterable, batch_size=1):
"""Load a list of image paths in batches.
Args:
iterable: a list of image paths
n: batch size
"""
total_len = len(iterable)
for ndx in range(0, total_len, batch_size):
yield iterable[ndx:min(ndx + batch_size, total_len)]
@monitor_status(name='efficientdet', mode='inference')
def infer_tlt(cfg):
"""Launch EfficientDet TLT model Inference."""
# disable_eager_execution()
tf.autograph.set_verbosity(0)
# Parse and update hparams
MODE = 'infer'
config = hparams_config.get_detection_config(cfg.model.name)
config.update(generate_params_from_cfg(config, cfg, mode=MODE))
params = config.as_dict()
# Parse label map
label_id_mapping = {}
if cfg.inference.label_map:
logger.info("Parsing label map...")
if str(cfg.inference.label_map).endswith('.yaml'):
label_id_mapping = label_utils.get_label_map(cfg.inference.label_map)
else:
label_id_mapping = get_label_dict(cfg.inference.label_map)
# Load model from graph json
logger.info('Loading model from: %s', cfg.inference.checkpoint)
model = helper.load_model(cfg.inference.checkpoint, cfg, MODE, is_qat=cfg.train.qat)
infer_model = inference.InferenceModel(model, config.image_size, params,
cfg.inference.batch_size,
label_id_mapping=label_id_mapping,
min_score_thresh=cfg.inference.min_score_thresh,
max_boxes_to_draw=cfg.inference.max_boxes_to_draw)
imgpath_list = [os.path.join(cfg.inference.image_dir, imgname)
for imgname in sorted(os.listdir(cfg.inference.image_dir))
if os.path.splitext(imgname)[1].lower()
in supported_img_format]
logger.info("Running inference...")
for image_paths in batch_generator(imgpath_list, cfg.inference.batch_size):
infer_model.visualize_detections(
image_paths,
cfg.inference.results_dir,
cfg.inference.dump_label)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="inference", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig):
"""Wrapper function for EfficientDet inference."""
cfg = update_results_dir(cfg, 'inference')
initialize(cfg, logger, training=False)
infer_tlt(cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet standalone evaluation script."""
import logging
import os
from mpi4py import MPI
import tensorflow as tf
from nvidia_tao_tf2.common.decorators import monitor_status
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
import nvidia_tao_tf2.common.logging.logging as status_logging
import nvidia_tao_tf2.common.no_warning # noqa pylint: disable=W0611
from nvidia_tao_tf2.common.utils import update_results_dir
from nvidia_tao_tf2.cv.efficientdet.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.efficientdet.dataloader import dataloader, datasource
from nvidia_tao_tf2.cv.efficientdet.processor.postprocessor import EfficientDetPostprocessor
from nvidia_tao_tf2.cv.efficientdet.utils import coco_metric, label_utils
from nvidia_tao_tf2.cv.efficientdet.utils import helper, hparams_config
from nvidia_tao_tf2.cv.efficientdet.utils.config_utils import generate_params_from_cfg
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import (
initialize, is_main_process, get_world_size, get_rank)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level='INFO')
logger = logging.getLogger(__name__)
@monitor_status(name='efficientdet', mode='evaluation')
def run_experiment(cfg):
"""Run evaluation."""
MODE = 'eval'
# Parse and update hparams
config = hparams_config.get_detection_config(cfg.model.name)
config.update(generate_params_from_cfg(config, cfg, mode=MODE))
# Set up dataloader
eval_sources = datasource.DataSource(
cfg.dataset.val_tfrecords,
cfg.dataset.val_dirs)
eval_dl = dataloader.CocoDataset(
eval_sources,
is_training=False,
max_instances_per_image=config.max_instances_per_image)
eval_dataset = eval_dl(
config.as_dict(),
batch_size=cfg.evaluate.batch_size)
num_samples = (cfg.evaluate.num_samples + get_world_size() - 1) // get_world_size()
num_samples = (num_samples + cfg.evaluate.batch_size - 1) // cfg.evaluate.batch_size
cfg.evaluate.num_samples = num_samples
eval_dataset = eval_dataset.shard(get_world_size(), get_rank()).take(num_samples)
# Load model from graph json
model = helper.load_model(cfg.evaluate.checkpoint, cfg, MODE, is_qat=cfg.train.qat)
# Set up postprocessor
postpc = EfficientDetPostprocessor(config)
label_map = label_utils.get_label_map(cfg.evaluate.label_map)
evaluator = coco_metric.EvaluationMetric(
filename=cfg.dataset.val_json_file, label_map=label_map)
@tf.function
def eval_model_fn(images, labels):
cls_outputs, box_outputs = model(images, training=False)
detections = postpc.generate_detections(
cls_outputs, box_outputs,
labels['image_scales'],
labels['source_ids'])
def transform_detections(detections):
# A transforms detections in [id, x1, y1, x2, y2, score, class]
# form to [id, x, y, w, h, score, class]."""
return tf.stack([
detections[:, :, 0],
detections[:, :, 1],
detections[:, :, 2],
detections[:, :, 3] - detections[:, :, 1],
detections[:, :, 4] - detections[:, :, 2],
detections[:, :, 5],
detections[:, :, 6],
], axis=-1)
tf.numpy_function(
evaluator.update_state,
[labels['groundtruth_data'], transform_detections(detections)], [])
evaluator.reset_states()
# evaluate all images.
pbar = tf.keras.utils.Progbar(num_samples)
for i, (images, labels) in enumerate(eval_dataset):
eval_model_fn(images, labels)
if is_main_process():
pbar.update(i)
# gather detections from all ranks
evaluator.gather()
if is_main_process():
# compute the final eval results.
metrics = evaluator.result()
metric_dict = {}
for i, name in enumerate(evaluator.metric_names):
metric_dict[name] = metrics[i]
if label_map:
print("=============")
print("Per class AP ")
print("=============")
for i, cid in enumerate(sorted(label_map.keys())):
name = f'AP_{label_map[cid]}'
metric_dict[name] = metrics[i + len(evaluator.metric_names)]
print(f'{name}: {metric_dict[name]:.03f}')
for k, v in metric_dict.items():
status_logging.get_status_logger().kpi[k] = float(v)
MPI.COMM_WORLD.Barrier() # noqa pylint: disable=I1101
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="eval", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for EfficientDet evaluation."""
cfg = update_results_dir(cfg, 'evaluate')
initialize(cfg, logger, training=False)
run_experiment(cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define entrypoint to run tasks for efficientdet."""
import argparse
from nvidia_tao_tf2.cv.efficientdet import scripts
from nvidia_tao_tf2.common.entrypoint.entrypoint import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"efficientdet_tf2",
add_help=True,
description="TAO Toolkit entrypoint for EfficientDet (TF2)"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(
parser, subtasks,
multigpu_support=['train', 'evaluate'],
task="efficientdet_tf2"
)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/entrypoint/efficientdet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for classification."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer related utils."""
import logging
import tensorflow as tf
from tensorflow_addons.optimizers import MovingAverage
from nvidia_tao_tf2.cv.efficientdet.model import learning_rate
@tf.keras.utils.register_keras_serializable(package='Custom')
class HvdMovingAverage(MovingAverage):
"""MovingAverage to support Horovod."""
def swap_weights(self):
"""Swap the average and moving weights.
The original function in the parent class assumes a cross replica
context, which fails for single GPU training. It also failed in the case of
multi-GPU training with Horovod.
"""
self._swap_weights()
def _create_slots(self, var_list):
"""HvdMovingAverage _create_slots summary.
The original function in the parent class, in addition to calling
_create_slots() of the base optimizer, reassigns trainable tensors to
self._average_weights and self._model_weights, which has the effect of
removing non-trainable tensors (e.g., moving means and variances) from EMA.
By overriding it, we simply keep the part that calls _create_slots of the base
optimizer. To make up for the removed part of the code, we call shadow_copy, which
assigns both trainable and non-trainable tensors to self._average_weights and
self._model_weights.
Args:
var_list ([type]): [description]
"""
self._optimizer._create_slots(var_list=var_list)
def apply_gradients(self, grads_and_vars, name=None, experimental_aggregate_gradients=True):
"""Apply gradients."""
self._optimizer._iterations = self.iterations
result = super().apply_gradients(grads_and_vars, name)
# update EMA weights after the weights are updated
self.update_average(self._optimizer.iterations)
return result
def _resource_apply_dense(self, grad, var):
"""HvdMovingAverage _resource_apply_dense summary.
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_dense(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
"""HvdMovingAverage _resource_apply_sparse summary.
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_sparse(grad, var, indices)
def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):
"""_resource_apply_sparse_duplicate_indices.
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_sparse_duplicate_indices(
grad, var, indices)
def get_decay_value(self, step, average_decay):
"""Get decay value for updating average."""
# Pyarmor doesn't recognize the tf.function decorator
# Which results in the error: using a `tf.Tensor` as a Python `bool` is not allowed in Graph execution.
# Hence we use tf.py_function as a temporary workaround
if step < self._start_step:
decay = tf.constant(0., tf.float32)
elif self._dynamic_decay:
decay = step - self._start_step
decay = tf.minimum(average_decay, (1. + decay) / (10. + decay))
else:
decay = average_decay
return decay
def update_average(self, step: tf.Tensor):
"""Update average."""
step = tf.cast(step, tf.float32)
average_decay = self._get_hyper("average_decay", tf.dtypes.float32)
decay = tf.py_function(func=self.get_decay_value, inp=[step, average_decay], Tout=average_decay.dtype)
def _apply_moving(v_moving, v_normal):
diff = v_moving - v_normal
v_moving.assign_sub(tf.cast(1. - decay, v_moving.dtype) * diff)
return v_moving
def _update(strategy, v_moving_and_v_normal):
for v_moving, v_normal in v_moving_and_v_normal:
strategy.extended.update(v_moving, _apply_moving, args=(v_normal,))
ctx = tf.distribute.get_replica_context()
return ctx.merge_call(_update, args=(zip(self._average_weights, self._model_weights),))
@classmethod
def from_config(cls, config, custom_objects=None):
"""From config."""
optimizer = tf.keras.optimizers.deserialize(
config.pop('optimizer'),
custom_objects=custom_objects,
)
return cls(optimizer, **config)
def get_optimizer(params, steps_per_epoch):
"""Get optimizer.
Args:
params (hparams_config.Config): Hyperparameter config.
steps_per_epoch (int): Number of steps per epoch.
"""
lr = learning_rate.learning_rate_schedule(params, steps_per_epoch)
if params['optimizer'].lower() == 'sgd':
logging.debug('Use SGD optimizer')
optimizer = tf.keras.optimizers.legacy.SGD(
lr, momentum=params['momentum'])
elif params['optimizer'].lower() == 'adam':
logging.debug('Use Adam optimizer')
optimizer = tf.keras.optimizers.legacy.Adam(
lr, beta_1=params['momentum'])
else:
raise ValueError('optimizer should be sgd or adam')
moving_average_decay = params['moving_average_decay']
if moving_average_decay is not None and moving_average_decay > 0.0:
optimizer = HvdMovingAverage(optimizer, average_decay=moving_average_decay, dynamic_decay=True) # noqa pylint: disable=E0110
if params.get('mixed_precision', None):
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(
optimizer, initial_scale=params.get('loss_scale', None))
return optimizer
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/optimizer_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utils."""
import tensorflow as tf
def batch_norm_class(is_training=True):
"""Choose BN based on training phase."""
if is_training:
# TODO(fsx950223): use SyncBatchNorm after TF bug is fixed (incorrect nccl
# all_reduce). See https://github.com/tensorflow/tensorflow/issues/41980
return tf.keras.layers.BatchNormalization
return tf.keras.layers.BatchNormalization
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/normalization_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet (tf.keras) builder."""
import logging
import numpy as np
import tensorflow as tf
from nvidia_tao_tf2.backbones.efficientnet_tf import EfficientNetB0, EfficientNetB1
from nvidia_tao_tf2.backbones.efficientnet_tf import EfficientNetB2, EfficientNetB3
from nvidia_tao_tf2.backbones.efficientnet_tf import EfficientNetB4, EfficientNetB5
from nvidia_tao_tf2.backbones.resnet_tf import ResNet
logger = logging.getLogger(__name__)
mappings = {
'efficientdet-d0': [
'block1a_project_bn', 'block2b_add', 'block3b_add', 'block5c_add', 'block7a_project_bn'],
'efficientdet-d1': [
'block1b_project_bn', 'block2c_add', 'block3c_add', 'block5d_add', 'block7b_project_bn'],
'efficientdet-d2': [
'block1b_project_bn', 'block2c_add', 'block3c_add', 'block5d_add', 'block7b_project_bn'],
'efficientdet-d3': [
'block1b_project_bn', 'block2c_add', 'block3c_add', 'block5e_add', 'block7b_project_bn'],
'efficientdet-d4': [
'block1b_project_bn', 'block2d_add', 'block3d_add', 'block5f_add', 'block7b_project_bn'],
'efficientdet-d5': [
'block1b_project_bn', 'block2e_add', 'block3e_add', 'block5g_add', 'block7c_project_bn'],
'resdet18': ['stem_activation', 'block_1b_relu', 'block_2b_relu', 'block_3b_relu', 'block_4b_relu'],
'resdet34': ['stem_activation', 'block_1c_relu', 'block_2d_relu', 'block_3f_relu', 'block_4c_relu'],
}
def swish(features, use_native=True, use_hard=False):
"""Computes the Swish activation function.
We provide three alternatives:
- Native tf.nn.swish, use less memory during training than composable swish.
- Quantization friendly hard swish.
- A composable swish, equivalent to tf.nn.swish, but more general for
finetuning and TF-Hub.
Args:
features: A `Tensor` representing preactivation values.
use_native: Whether to use the native swish from tf.nn that uses a custom
gradient to reduce memory usage, or to use customized swish that uses
default TensorFlow gradient computation.
use_hard: Whether to use quantization-friendly hard swish.
Returns:
The activation value.
"""
if use_native and use_hard:
raise ValueError('Cannot specify both use_native and use_hard.')
if use_native:
return tf.nn.swish(features)
if use_hard:
return features * tf.nn.relu6(features + np.float32(3)) * (1. / 6.)
features = tf.convert_to_tensor(features, name='features')
return features * tf.nn.sigmoid(features)
def build_model_base(images, model_name='efficientdet-d0',
num_classes=2, freeze_blocks=None, freeze_bn=False,
data_format='channels_last'):
"""Create a base feature network and return the features before pooling.
Args:
images: input images tensor.
model_name: string, the predefined model name.
Returns:
features: base features before pooling.
endpoints: the endpoints for each layer.
Raises:
When model_name specified an undefined model, raises NotImplementedError.
"""
supported_backbones = {
'efficientdet-d0': EfficientNetB0,
'efficientdet-d1': EfficientNetB1,
'efficientdet-d2': EfficientNetB2,
'efficientdet-d3': EfficientNetB3,
'efficientdet-d4': EfficientNetB4,
'efficientdet-d5': EfficientNetB5,
'resdet18': ResNet,
'resdet34': ResNet,
}
if model_name not in supported_backbones.keys():
raise ValueError("{} is not a supported arch. \
Please choose from `efficientdet-d0` to `efficientdet-d5`.")
model = supported_backbones[model_name](
nlayers=int(model_name[6:]) if 'resdet' in model_name else 0,
add_head=False,
input_tensor=images,
classes=num_classes,
use_pooling=True,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None)
return [model.get_layer(fmap).output for fmap in mappings[model_name]]
def build_backbone(features, config):
"""Builds backbone model.
Args:
features: input tensor.
config: config for backbone, such as is_training_bn and backbone name.
Returns:
A dict from levels to the feature maps from the output of the backbone model
with strides of 8, 16 and 32.
Raises:
ValueError: if backbone_name is not supported.
"""
# build tf efficientnet backbone
u1, u2, u3, u4, u5 = build_model_base(
features, config.model_name,
freeze_blocks=config.freeze_blocks,
freeze_bn=config.freeze_bn,
data_format=config.data_format)
return {0: features, 1: u1, 2: u2, 3: u3, 4: u4, 5: u5}
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/model_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras implementation of efficientdet."""
import functools
import numpy as np
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.model import model_builder
from nvidia_tao_tf2.cv.efficientdet.model import activation_builder
from nvidia_tao_tf2.cv.efficientdet.model import fpn_configs
from nvidia_tao_tf2.cv.efficientdet.layers.image_resize_layer import ImageResizeLayer
from nvidia_tao_tf2.cv.efficientdet.layers.weighted_fusion_layer import WeightedFusion
from nvidia_tao_tf2.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf2.cv.efficientdet.utils import model_utils
from nvidia_tao_tf2.cv.efficientdet.utils import keras_utils
# pylint: disable=arguments-differ # fo keras layers.
class SeparableConvWAR:
"""WAR for tf.keras.layers.SeparableConv2D."""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
name='separable_conv_war',
**kwargs) -> None:
"""Init."""
self.dw_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=padding,
depth_multiplier=depth_multiplier,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=False,
depthwise_initializer=depthwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
name=name + '_dw',
**kwargs)
self.pw_conv = tf.keras.layers.Conv2D(
filters,
1,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
name=name,
**kwargs)
def __call__(self, inputs):
"""Call."""
x = self.dw_conv(inputs)
x = self.pw_conv(x)
return x
class FNode:
"""A Keras Layer implementing BiFPN Node."""
def __init__(self,
feat_level,
inputs_offsets,
fpn_num_filters,
apply_bn_for_resampling,
is_training_bn,
conv_after_downsample,
conv_bn_act_pattern,
separable_conv,
act_type,
weight_method,
data_format,
name='fnode'):
"""Init Fnode."""
self.feat_level = feat_level
self.inputs_offsets = inputs_offsets
self.fpn_num_filters = fpn_num_filters
self.apply_bn_for_resampling = apply_bn_for_resampling
self.separable_conv = separable_conv
self.act_type = act_type
self.is_training_bn = is_training_bn
self.conv_after_downsample = conv_after_downsample
self.data_format = data_format
self.weight_method = weight_method
self.conv_bn_act_pattern = conv_bn_act_pattern
self.resample_layers = []
self.name = name
for i, input_offset in enumerate(self.inputs_offsets):
self.resample_layers.append(
ResampleFeatureMap(
self.feat_level,
self.fpn_num_filters,
self.apply_bn_for_resampling,
self.is_training_bn,
self.conv_after_downsample,
data_format=self.data_format,
name=self.name + f'resample_{i}_{input_offset}'))
self.op_after_combine = OpAfterCombine(
self.is_training_bn,
self.conv_bn_act_pattern,
self.separable_conv,
self.fpn_num_filters,
self.act_type,
self.data_format,
name=f'{self.name}_op_after_combine_{feat_level}')
self.fuse_layer = WeightedFusion(
inputs_offsets=self.inputs_offsets,
name=f'fusion_{self.name}')
def __call__(self, feats, training):
"""Call."""
nodes = []
for i, input_offset in enumerate(self.inputs_offsets):
input_node = feats[input_offset]
input_node = self.resample_layers[i](input_node, training, feats)
nodes.append(input_node)
new_node = self.fuse_layer(nodes)
new_node = self.op_after_combine(new_node, training)
return feats + [new_node]
class OpAfterCombine:
"""Operation after combining input features during feature fusion."""
def __init__(self,
is_training_bn,
conv_bn_act_pattern,
separable_conv,
fpn_num_filters,
act_type,
data_format,
name='op_after_combine'):
"""Init OpAfterCombine."""
self.conv_bn_act_pattern = conv_bn_act_pattern
self.separable_conv = separable_conv
self.fpn_num_filters = fpn_num_filters
self.act_type = act_type
self.data_format = data_format
self.is_training_bn = is_training_bn
if self.separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D, depth_multiplier=1)
# conv2d_layer = functools.partial(
# SeparableConvWAR, depth_multiplier=1)
else:
conv2d_layer = tf.keras.layers.Conv2D
self.conv_op = conv2d_layer(
filters=fpn_num_filters,
kernel_size=(3, 3),
padding='same',
use_bias=not self.conv_bn_act_pattern,
data_format=self.data_format,
name=name + '_conv')
self.bn = keras_utils.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name=name + '_bn')
def __call__(self, new_node, training):
"""Call."""
if not self.conv_bn_act_pattern:
new_node = activation_builder.activation_fn(new_node, self.act_type)
new_node = self.conv_op(new_node)
new_node = self.bn(new_node, training=training)
if self.conv_bn_act_pattern:
new_node = activation_builder.activation_fn(new_node, self.act_type)
return new_node
class ResampleFeatureMap:
"""Resample feature map for downsampling or upsampling."""
def __init__(self,
feat_level,
target_num_channels,
apply_bn=False,
is_training_bn=None,
conv_after_downsample=False,
data_format=None,
pooling_type=None,
upsampling_type=None,
name='resample_p0'):
"""Init ResampleFeatureMap."""
self.apply_bn = apply_bn
self.is_training_bn = is_training_bn
self.data_format = data_format
self.target_num_channels = target_num_channels
self.feat_level = feat_level
self.conv_after_downsample = conv_after_downsample
self.pooling_type = pooling_type or 'max'
self.upsampling_type = upsampling_type or 'nearest'
self.conv2d = tf.keras.layers.Conv2D(
self.target_num_channels, (1, 1),
padding='same',
data_format=self.data_format,
name=name + '_conv2d')
self.bn = keras_utils.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name=name + '_bn')
def _pool2d(self, inputs, height, width, target_height, target_width):
"""Pool the inputs to target height and width."""
height_stride_size = int((height - 1) // target_height + 1)
width_stride_size = int((width - 1) // target_width + 1)
if self.pooling_type == "max":
model_class = tf.keras.layers.MaxPooling2D
elif self.pooling_type == "avg":
model_class = tf.keras.layers.AveragePooling2D
else:
raise NotImplementedError(f"Unsupported pooling type {self.pooling_type}")
return model_class(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(inputs)
def _maybe_apply_1x1(self, feat, training, num_channels):
"""Apply 1x1 conv to change layer width if necessary."""
if num_channels != self.target_num_channels:
feat = self.conv2d(feat)
if self.apply_bn:
feat = self.bn(feat, training=training)
return feat
def __call__(self, feat, training, all_feats):
"""Call."""
hwc_idx = (2, 3, 1) if self.data_format == 'channels_first' else (1, 2, 3)
height, width, num_channels = [feat.shape.as_list()[i] for i in hwc_idx]
if all_feats:
target_feat_shape = all_feats[self.feat_level].shape.as_list()
target_height, target_width, _ = [target_feat_shape[i] for i in hwc_idx]
else:
# Default to downsampling if all_feats is empty.
target_height, target_width = (height + 1) // 2, (width + 1) // 2
# If conv_after_downsample is True, when downsampling, apply 1x1 after
# downsampling for efficiency.
if height > target_height and width > target_width:
if not self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
feat = self._pool2d(feat, height, width, target_height, target_width)
if self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
elif height <= target_height and width <= target_width:
feat = self._maybe_apply_1x1(feat, training, num_channels)
if height < target_height or width < target_width:
feat = ImageResizeLayer(
target_height, target_width, self.data_format)(feat)
else:
raise ValueError(
f'Incompatible Resampling : feat shape {height}x{width} '
f'target_shape: {target_height}x{target_width}'
)
return feat
class ClassNet:
"""Object class prediction network."""
def __init__(self,
num_classes=90,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training_bn=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
data_format='channels_last',
name='class_net',
**kwargs):
"""Initialize the ClassNet.
Args:
num_classes: number of classes.
num_anchors: number of anchors.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of intermediate layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
name: the name of this layerl.
**kwargs: other parameters.
"""
self.num_classes = num_classes
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training_bn = is_training_bn
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
if separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D,
# SeparableConvWAR,
depth_multiplier=1,
data_format=data_format,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling())
else:
conv2d_layer = functools.partial(
tf.keras.layers.Conv2D,
data_format=data_format,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
for i in range(self.repeats):
# If using SeparableConv2D
self.conv_ops.append(
conv2d_layer(
self.num_filters,
kernel_size=3,
bias_initializer=tf.zeros_initializer(),
activation=None,
padding='same',
name=f'class-{i}'
)
)
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
keras_utils.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name=f'class-{i}-bn-{level}',
)
)
self.bns.append(bn_per_level)
self.classes = conv2d_layer(
num_classes * num_anchors,
kernel_size=3,
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding='same',
name='class-predict')
def __call__(self, inputs, training, **kwargs):
"""Call ClassNet."""
class_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
original_image = image
image = self.conv_ops[i](image)
image = self.bns[i][level_id](image, training=training)
if self.act_type:
image = activation_builder.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = model_utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
class_outputs.append(self.classes(image))
return class_outputs
class BoxNet:
"""Box regression network."""
def __init__(self,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training_bn=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
data_format='channels_last',
name='box_net',
**kwargs):
"""Initialize BoxNet.
Args:
num_anchors: number of anchors used.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of "intermediate" layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
name: Name of the layer.
**kwargs: other parameters.
"""
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training_bn = is_training_bn
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
for i in range(self.repeats):
# If using SeparableConv2D
if self.separable_conv:
self.conv_ops.append(
tf.keras.layers.SeparableConv2D( # SeparableConvWAR(
filters=self.num_filters,
depth_multiplier=1,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name=f'box-{i}'))
# If using Conv2d
else:
self.conv_ops.append(
tf.keras.layers.Conv2D(
filters=self.num_filters,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name=f'box-{i}'))
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
keras_utils.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name=f'box-{i}-bn-{level}'
)
)
self.bns.append(bn_per_level)
if self.separable_conv:
self.boxes = tf.keras.layers.SeparableConv2D( # SeparableConvWAR(
filters=4 * self.num_anchors,
depth_multiplier=1,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
else:
self.boxes = tf.keras.layers.Conv2D(
filters=4 * self.num_anchors,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
def __call__(self, inputs, training):
"""Call boxnet."""
box_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
original_image = image
image = self.conv_ops[i](image)
image = self.bns[i][level_id](image, training=training)
if self.act_type:
image = activation_builder.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = model_utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
box_outputs.append(self.boxes(image))
return box_outputs
class SegmentationHead:
"""Keras layer for semantic segmentation head."""
def __init__(self,
num_classes,
num_filters,
min_level,
max_level,
data_format,
is_training_bn,
act_type,
**kwargs):
"""Initialize SegmentationHead.
Args:
num_classes: number of classes.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
data_format: string of 'channel_first' or 'channels_last'.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
**kwargs: other parameters.
"""
self.act_type = act_type
self.con2d_ts = []
self.con2d_t_bns = []
for _ in range(max_level - min_level):
self.con2d_ts.append(
tf.keras.layers.Conv2DTranspose(
num_filters,
3,
strides=2,
padding='same',
data_format=data_format,
use_bias=False))
self.con2d_t_bns.append(
keras_utils.build_batch_norm(
is_training_bn=is_training_bn,
data_format=data_format,
name='bn'))
self.head_transpose = tf.keras.layers.Conv2DTranspose(
num_classes, 3, strides=2, padding='same')
def __call__(self, feats, training):
"""Call."""
x = feats[-1]
skips = list(reversed(feats[:-1]))
for con2d_t, con2d_t_bn, skip in zip(self.con2d_ts, self.con2d_t_bns, skips):
x = con2d_t(x)
x = con2d_t_bn(x, training)
x = activation_builder.activation_fn(x, self.act_type)
x = tf.concat([x, skip], axis=-1)
# This is the last layer of the model
return self.head_transpose(x) # 64x64 -> 128x128
class FPNCells:
"""FPN cells."""
def __init__(self, config, name='fpn'):
"""Init FPNCells."""
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method)
self.cells = [
FPNCell(self.config, name=name + f'_cell_{rep}')
for rep in range(self.config.fpn_cell_repeats)
]
def __call__(self, feats, training):
"""Call."""
for cell in self.cells:
cell_feats = cell(feats, training)
min_level = self.config.min_level
max_level = self.config.max_level
feats = []
for level in range(min_level, max_level + 1):
for i, fnode in enumerate(reversed(self.fpn_config.nodes)):
if fnode['feat_level'] == level:
feats.append(cell_feats[-1 - i])
break
return feats
class FPNCell:
"""A single FPN cell."""
def __init__(self, config, name='fpn_cell'):
"""Init FPNCell."""
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method)
self.fnodes = []
for i, fnode_cfg in enumerate(self.fpn_config.nodes):
fnode = FNode(
fnode_cfg['feat_level'] - self.config.min_level,
fnode_cfg['inputs_offsets'],
config.fpn_num_filters,
config.apply_bn_for_resampling,
config.is_training_bn,
config.conv_after_downsample,
config.conv_bn_act_pattern,
config.separable_conv,
config.act_type,
weight_method=self.fpn_config.weight_method,
data_format=config.data_format,
name=name + f'_fnode{i}')
self.fnodes.append(fnode)
def __call__(self, feats, training):
"""Call."""
for fnode in self.fnodes:
feats = fnode(feats, training)
return feats
def efficientdet(input_shape, inputs=None, training=True, model_name=None, config=None):
"""Build EfficienDet model graph."""
config = config or hparams_config.get_efficientdet_config(model_name)
if inputs is None:
inputs = tf.keras.Input(shape=input_shape)
else:
inputs = tf.keras.Input(tensor=inputs, shape=input_shape)
# Feature network.
resample_layers = [] # additional resampling layers.
for level in range(6, config.max_level + 1):
# Adds a coarser level by downsampling the last feature map.
resample_layers.append(
ResampleFeatureMap(
feat_level=(level - config.min_level),
target_num_channels=config.fpn_num_filters,
apply_bn=config.apply_bn_for_resampling,
is_training_bn=config.is_training_bn,
conv_after_downsample=config.conv_after_downsample,
data_format=config.data_format,
name=f'resample_p{level}',
))
fpn_cells = FPNCells(config)
# class/box output prediction network.
num_anchors = len(config.aspect_ratios) * config.num_scales
num_filters = config.fpn_num_filters
for head in config.heads:
if head == 'object_detection':
class_net = ClassNet(
num_classes=config.num_classes,
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
data_format=config.data_format)
box_net = BoxNet(
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
data_format=config.data_format)
# call backbone network.
all_feats = model_builder.build_backbone(inputs, config)
feats = [all_feats[k] for k in sorted(all_feats.keys())][config.min_level:config.max_level + 1]
# feats = all_feats[config.min_level:config.max_level + 1]
# Build additional input features that are not from backbone.
for resample_layer in resample_layers:
feats.append(resample_layer(feats[-1], training, None))
# call feature network.
fpn_feats = fpn_cells(feats, training)
# call class/box/seg output network.
outputs = []
if 'object_detection' in config.heads:
class_outputs = class_net(fpn_feats, training)
box_outputs = box_net(fpn_feats, training)
outputs.extend([class_outputs, box_outputs])
final_model = tf.keras.Model(inputs=inputs, outputs=outputs, name=config.name)
return final_model
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/efficientdet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BiFPN/QuFPN and other FPN configs.
BiFPN is presented in the EfficientDet paper.
QuFPN is proposed in https://github.com/google/automl/pull/580
"""
import itertools
from nvidia_tao_tf2.cv.efficientdet.utils import hparams_config
def bifpn_config(min_level, max_level, weight_method):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
# Node id starts from the input features and monotonically increase whenever
# a new node is added. Here is an example for level P3 - P7:
# P7 (4) P7" (12)
# P6 (3) P6' (5) P6" (11)
# P5 (2) P5' (6) P5" (10)
# P4 (1) P4' (7) P4" (9)
# P3 (0) P3" (8)
# So output would be like:
# [
# {'feat_level': 6, 'inputs_offsets': [3, 4]}, # for P6'
# {'feat_level': 5, 'inputs_offsets': [2, 5]}, # for P5'
# {'feat_level': 4, 'inputs_offsets': [1, 6]}, # for P4'
# {'feat_level': 3, 'inputs_offsets': [0, 7]}, # for P3"
# {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}, # for P4"
# {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}, # for P5"
# {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}, # for P6"
# {'feat_level': 7, 'inputs_offsets': [4, 11]}, # for P7"
# ]
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1] # noqa pylint: disable=E731,C3001
level_all_ids = lambda level: node_ids[level] # noqa pylint: disable=E731,C3001
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)]
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)]
})
node_ids[i].append(next(id_cnt))
return p
def qufpn_config(min_level, max_level, weight_method=None):
"""A dynamic quad fpn config that can adapt to different min/max levels."""
# It extends the idea of BiFPN, and has four paths:
# (up_down -> bottom_up) + (bottom_up -> up_down).
# See test for an example for level 2 and 7.
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
p.quad_method = 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1] # noqa pylint: disable=E731,C3001
level_all_ids = lambda level: node_ids[level] # noqa pylint: disable=E731,C3001
level_first_id = lambda level: node_ids[level][0] # noqa pylint: disable=E731,C3001
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path 1.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(min_level + 1, max_level):
# bottom-up path 2.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
i = max_level
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_first_id(i)] + [level_last_id(i - 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(min_level + 1, max_level + 1, 1):
# bottom-up path 3.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [
level_first_id(i),
level_last_id(i - 1) if i != min_level + 1 else level_first_id(i - 1)
],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(max_level - 1, min_level, -1):
# top-down path 4.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [node_ids[i][0]] + [node_ids[i][-1]] + [level_last_id(i + 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
i = min_level
p.nodes.append({
'feat_level': i,
'inputs_offsets': [node_ids[i][0]] + [level_last_id(i + 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(max_level, min_level - 1, -1):
# quad-add path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [node_ids[i][2], node_ids[i][4]],
'weight_method': p.quad_method
})
node_ids[i].append(next(id_cnt))
return p
def get_fpn_config(fpn_name, min_level, max_level, weight_method):
"""Get fpn related configuration."""
if not fpn_name:
fpn_name = 'bifpn'
name_to_config = {
'bifpn': bifpn_config(min_level, max_level, weight_method),
'qufpn': qufpn_config(min_level, max_level, weight_method),
# legacy only: to be deprecated.
'bifpn_dyn': bifpn_config(min_level, max_level, weight_method),
}
return name_to_config[fpn_name]
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/fpn_configs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learning rate related utils."""
import math
import logging
from typing import Any, Mapping
import tensorflow as tf
logger = logging.getLogger(__name__)
@tf.keras.utils.register_keras_serializable(package='Custom')
class CosineLrSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Cosine learning rate schedule."""
def __init__(self, base_lr: float, lr_warmup_init: float,
lr_warmup_step: int, total_steps: int):
"""Build a CosineLrSchedule.
Args:
base_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
total_steps: `int`, Total train steps.
"""
super().__init__()
logger.debug('LR schedule method: cosine')
self.base_lr = base_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
def __call__(self, step):
"""Call."""
linear_warmup = (
self.lr_warmup_init +
(tf.cast(step, dtype=tf.float32) / self.lr_warmup_step *
(self.base_lr - self.lr_warmup_init)))
cosine_lr = 0.5 * self.base_lr * (
1 + tf.cos(math.pi * (tf.cast(step, tf.float32) - self.lr_warmup_step) / self.decay_steps))
return tf.where(step < self.lr_warmup_step, linear_warmup, cosine_lr)
def get_config(self) -> Mapping[str, Any]:
"""Get config."""
return {
"base_lr": self.base_lr,
"lr_warmup_init": self.lr_warmup_init,
"lr_warmup_step": self.lr_warmup_step,
}
@tf.keras.utils.register_keras_serializable(package='Custom')
class SoftStartAnnealingLearningRateScheduler(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Learning rate scheduler implementation.
Learning rate scheduler modulates learning rate according to the progress in the
training experiment. Specifically the training progress is defined as the ratio of
the current iteration to the maximum iterations. Learning rate scheduler adjusts
learning rate in the following 3 phases:
Phase 1: 0.0 <= progress < soft_start:
Starting from min_lr exponentially increase the learning rate to base_lr
Phase 2: soft_start <= progress < annealing_start:
Maintain the learning rate at base_lr
Phase 3: annealing_start <= progress <= 1.0:
Starting from base_lr exponentially decay the learning rate to min_lr
Args:
base_lr: Maximum learning rate
min_lr_ratio: The ratio between minimum learning rate (min_lr) and base_lr
soft_start: The progress at which learning rate achieves base_lr when starting from min_lr
annealing_start: The progress at which learning rate starts to drop from base_lr to min_lr
total_steps: Total number of iterations in the experiment
"""
def __init__(self, base_lr, lr_warmup_init, soft_start,
annealing_start, total_steps):
"""__init__ method."""
super().__init__()
logger.debug('LR schedule method: SoftStartAnnealing')
if not 0.0 <= soft_start <= 1.0:
raise ValueError('The soft_start variable should be >= 0.0 or <= 1.0.')
if not 0.0 <= annealing_start <= 1.0:
raise ValueError('The annealing_start variable should be >= 0.0 or <= 1.0.')
if not soft_start < annealing_start:
raise ValueError('Variable soft_start should be less than annealing_start.')
self.base_lr = base_lr
self.soft_start = soft_start # Increase to lr from min_lr until this point.
self.annealing_start = annealing_start # Start annealing to min_lr at this point.
self.total_steps = total_steps
self.lr_warmup_init = lr_warmup_init
def __call__(self, step):
"""Compute learning rate according to progress to reach total_steps."""
progress = step / self.total_steps
if self.soft_start > 0.0:
soft_start = progress / self.soft_start
else: # learning rate starts from base_lr
soft_start = 1.0
if self.annealing_start < 1.0:
annealing = (1.0 - progress) / (1.0 - self.annealing_start)
else:
annealing = 1.0
# t = soft_start if progress < self.soft_start else 1.0
t = tf.where(progress < self.soft_start, soft_start, 1.0)
# t = annealing if progress > self.annealing_start else t
t = tf.where(progress > self.annealing_start, annealing, t)
t = tf.cast(t, dtype=tf.float32)
lr = tf.math.exp(tf.math.log(self.lr_warmup_init) +
t * (tf.math.log(self.base_lr) - tf.math.log(self.lr_warmup_init)))
return lr
def get_config(self):
"""Config."""
return {
"base_lr": self.base_lr,
"lr_warmup_init": self.lr_warmup_init,
"soft_start": self.soft_start,
"annealing_start": self.annealing_start,
}
def learning_rate_schedule(params, steps_per_epoch):
"""Learning rate schedule based on global step.
Args:
params (TrainConfig): train config loaded by Hydra.
steps_per_epoch (int): Number of steps per epoch.
"""
supported_schedules = ['cosine', 'soft_anneal']
lr_warmup_step = int(params.lr_warmup_epoch * steps_per_epoch)
total_steps = int(params.num_epochs * steps_per_epoch)
lr_decay_method = str(params.lr_decay_method)
if lr_decay_method == 'cosine':
return CosineLrSchedule(params.learning_rate,
params.lr_warmup_init, lr_warmup_step,
total_steps)
if lr_decay_method == 'soft_anneal':
return SoftStartAnnealingLearningRateScheduler(
params.learning_rate,
params.lr_warmup_init,
params.lr_warmup_epoch / params.num_epochs,
params.lr_annealing_epoch / params.num_epochs,
total_steps)
raise ValueError(f'unknown lr_decay_method: {lr_decay_method}. \
Choose from {supported_schedules}')
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/learning_rate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the components and model architecture for EfficientDet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main training script."""
import logging
import os
import re
import tensorflow as tf
import horovod.tensorflow as hvd
from tensorflow_quantization.custom_qdq_cases import EfficientNetQDQCase
from tensorflow_quantization.quantize import quantize_model
from nvidia_tao_tf2.blocks.module import TAOModule
from nvidia_tao_tf2.cv.efficientdet.losses import losses
from nvidia_tao_tf2.cv.efficientdet.model.efficientdet import efficientdet
from nvidia_tao_tf2.cv.efficientdet.model import optimizer_builder
from nvidia_tao_tf2.cv.efficientdet.utils import keras_utils
from nvidia_tao_tf2.cv.efficientdet.utils.helper import decode_eff, dump_json, load_model, load_json_model
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import is_main_process, get_world_size
logger = logging.getLogger(__name__)
class EfficientDetModule(TAOModule):
"""EfficientDet Module."""
def __init__(self, hparams):
"""Init."""
self.hparams = hparams
self.steps_per_epoch = (
hparams.num_examples_per_epoch +
(hparams.train_batch_size * get_world_size()) - 1) // \
(hparams.train_batch_size * get_world_size())
num_samples = (hparams.eval_samples + get_world_size() - 1) // get_world_size()
self.num_samples = (num_samples + hparams.eval_batch_size - 1) // hparams.eval_batch_size
self.hparams.eval_samples = self.num_samples
self.resume_ckpt_path = os.path.join(hparams.results_dir, f'{hparams.name}.resume')
self.resume = os.path.exists(self.resume_ckpt_path)
self.model, self.eval_model = self._build_models(hparams)
self._load_pretrained_weights(hparams)
self.configure_optimizers(hparams, self.steps_per_epoch)
self.configure_losses(hparams)
if hparams.qat:
logger.info("QAT enabled.")
self._quantize_models()
if is_main_process():
self.model.summary()
self.compile()
self.initial_epoch = self._resume(hparams, self.steps_per_epoch)
def _quantize_models(self):
"""Quantize models."""
self.model = quantize_model(self.model, custom_qdq_cases=[EfficientNetQDQCase()])
self.eval_model = quantize_model(self.eval_model, custom_qdq_cases=[EfficientNetQDQCase()])
def _build_models(self, hparams):
"""Build train/eval unpruned/pruned models."""
if not hparams.pruned_model_path:
if is_main_process():
logger.info("Building unpruned graph...")
input_shape = list(hparams.image_size) + [3] \
if hparams.data_format == 'channels_last' else [3] + list(hparams.image_size)
original_learning_phase = tf.keras.backend.learning_phase()
model = efficientdet(input_shape, training=True, config=hparams)
tf.keras.backend.set_learning_phase(0)
eval_model = efficientdet(input_shape, training=False, config=hparams)
tf.keras.backend.set_learning_phase(original_learning_phase)
else:
if is_main_process():
logger.info("Loading pruned graph...")
original_learning_phase = tf.keras.backend.learning_phase()
model = load_model(hparams.pruned_model_path, hparams, mode='train')
tf.keras.backend.set_learning_phase(0)
eval_model = load_model(hparams.pruned_model_path, hparams, mode='eval')
tf.keras.backend.set_learning_phase(original_learning_phase)
# save nonQAT nonAMP graph in results_dir
dump_json(model, os.path.join(hparams.results_dir, 'train_graph.json'))
dump_json(eval_model, os.path.join(hparams.results_dir, 'eval_graph.json'))
return model, eval_model
def _resume(self, hparams, steps_per_epoch):
"""Resume from checkpoint."""
if self.resume:
ckpt_path, _ = decode_eff(self.resume_ckpt_path, hparams.encryption_key)
train_from_epoch = keras_utils.restore_ckpt(
self.model,
ckpt_path,
hparams.moving_average_decay,
steps_per_epoch=steps_per_epoch,
expect_partial=True)
return train_from_epoch
return self.hparams.init_epoch
def _load_pretrained_weights(self, hparams):
"""Load pretrained weights."""
if is_main_process() and not self.resume:
if str(hparams.checkpoint).endswith(".tlt"):
ckpt_path, ckpt_name = decode_eff(
str(hparams.checkpoint), hparams.encryption_key)
else:
ckpt_path = hparams.checkpoint
if ckpt_path:
if not hparams.pruned_model_path:
logger.info("Loading pretrained weight...")
if 'train_graph.json' in os.listdir(ckpt_path):
logger.info("Loading EfficientDet model...")
pretrained_model = load_json_model(
os.path.join(ckpt_path, 'train_graph.json'))
keras_utils.restore_ckpt(
pretrained_model,
os.path.join(ckpt_path, ckpt_name),
hparams.moving_average_decay,
steps_per_epoch=0,
expect_partial=True)
else:
logger.info("Loading EfficientNet backbone...")
pretrained_model = tf.keras.models.load_model(ckpt_path)
for layer in pretrained_model.layers[1:]:
# The layer must match up to prediction layers.
l_return = None
try:
l_return = self.model.get_layer(layer.name)
except ValueError:
# Some layers are not there
logger.info("Skipping %s, as it does not exist in the training model.", layer.name)
if l_return is not None:
try:
l_return.set_weights(layer.get_weights())
except ValueError:
logger.info("Skipping %s, due to shape mismatch.", layer.name)
def configure_losses(self, hparams, loss=None):
"""Configure losses."""
loss = loss or {}
focal_loss = losses.StableFocalLoss(
hparams.alpha,
hparams.gamma,
label_smoothing=hparams.label_smoothing,
reduction=tf.keras.losses.Reduction.NONE)
box_loss = losses.BoxLoss(
hparams.delta, # TODO(@yuw): add to default config
reduction=tf.keras.losses.Reduction.NONE)
box_iou_loss = losses.BoxIouLoss(
hparams.iou_loss_type,
hparams.min_level,
hparams.max_level,
hparams.num_scales,
hparams.aspect_ratios,
hparams.anchor_scale,
hparams.image_size,
reduction=tf.keras.losses.Reduction.NONE)
self.losses = {
'box_loss': box_loss,
'box_iou_loss': box_iou_loss,
'class_loss': focal_loss,
**loss
}
def configure_optimizers(self, hparams, steps_per_epoch):
"""Configure optimizers."""
self.optimizers = optimizer_builder.get_optimizer(
hparams, steps_per_epoch)
def compile(self):
"""Compile model."""
self.model.compile(
optimizer=self.optimizers,
loss=self.losses)
self.model.train_step = self.train_step
self.model.test_step = self.test_step
def train_step(self, data):
"""Train step.
Args:
data: Tuple of (images, labels). Image tensor with shape [batch_size,
height, width, 3]. The height and width are fixed and equal.Input labels
in a dictionary. The labels include class targets and box targets which
are dense label maps. The labels are generated from get_input_fn
function in data/dataloader.py.
Returns:
A dict record loss info.
"""
images, labels = data
with tf.GradientTape() as tape:
if len(self.hparams.heads) == 2:
cls_outputs, box_outputs, seg_outputs = self.model(images, training=True)
elif 'object_detection' in self.hparams.heads:
cls_outputs, box_outputs = self.model(images, training=True)
elif 'segmentation' in self.hparams.heads:
seg_outputs, = self.model(images, training=True)
raise ValueError("`segmentation` head is disabled. Please set `object_detection`.")
total_loss = 0
loss_vals = {}
if 'object_detection' in self.hparams.heads:
det_loss = self._detection_loss(cls_outputs, box_outputs, labels,
loss_vals)
total_loss += det_loss
if 'segmentation' in self.hparams.heads:
seg_loss_layer = self.model.loss['seg_loss']
seg_loss = seg_loss_layer(labels['image_masks'], seg_outputs)
total_loss += seg_loss
loss_vals['seg_loss'] = seg_loss
reg_l2_loss = self._reg_l2_loss(self.hparams.l2_weight_decay) if self.hparams.l2_weight_decay else 0
reg_l1_loss = self._reg_l1_loss(self.hparams.l1_weight_decay) if self.hparams.l1_weight_decay else 0
loss_vals['reg_l2_loss'] = reg_l2_loss
loss_vals['reg_l1_loss'] = reg_l1_loss
total_loss += (reg_l2_loss + reg_l1_loss)
if isinstance(self.model.optimizer,
tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = self.model.optimizer.get_scaled_loss(total_loss)
optimizer = self.model.optimizer._optimizer
else:
scaled_loss = total_loss
optimizer = self.model.optimizer
compress = keras_utils.get_mixed_precision_policy().compute_dtype == 'float16'
tape = hvd.DistributedGradientTape(
tape, compression=hvd.Compression.fp16 if compress else hvd.Compression.none)
loss_vals['loss'] = total_loss
loss_vals['learning_rate'] = optimizer.learning_rate(optimizer.iterations)
trainable_vars = self._freeze_vars()
scaled_gradients = tape.gradient(scaled_loss, trainable_vars)
if isinstance(self.model.optimizer,
tf.keras.mixed_precision.LossScaleOptimizer):
gradients = self.model.optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = scaled_gradients
if self.hparams.clip_gradients_norm > 0:
clip_norm = abs(self.hparams.clip_gradients_norm)
gradients = [
tf.clip_by_norm(g, clip_norm) if g is not None else None
for g in gradients
]
gradients, _ = tf.clip_by_global_norm(gradients, clip_norm)
loss_vals['gradient_norm'] = tf.linalg.global_norm(gradients)
# TODO(@yuw)
# grads_and_vars = []
# # Special treatment for biases (beta is named as bias in reference model)
# for grad, var in zip(gradients, trainable_vars):
# if grad is not None and any([pattern in var.name for pattern in ["bias", "beta"]]):
# grad = 2.0 * grad
# grads_and_vars.append((grad, var))
# self.model.optimizer.apply_gradients(grads_and_vars)
self.model.optimizer.apply_gradients(zip(gradients, trainable_vars))
return loss_vals
def test_step(self, data):
"""Test step.
Args:
data: Tuple of (images, labels). Image tensor with shape [batch_size,
height, width, 3]. The height and width are fixed and equal.Input labels
in a dictionary. The labels include class targets and box targets which
are dense label maps. The labels are generated from get_input_fn
function in data/dataloader.py.
Returns:
A dict record loss info.
"""
images, labels = data
if len(self.hparams.heads) == 2:
cls_outputs, box_outputs, seg_outputs = self.model(images, training=False)
elif 'object_detection' in self.hparams.heads:
cls_outputs, box_outputs = self.model(images, training=False)
elif 'segmentation' in self.hparams.heads:
seg_outputs, = self.model(images, training=False)
reg_l2loss = self._reg_l2_loss(self.hparams.l2_weight_decay) if self.hparams.l2_weight_decay else 0
reg_l1loss = self._reg_l1_loss(self.hparams.l1_weight_decay) if self.hparams.l1_weight_decay else 0
total_loss = reg_l2loss + reg_l1loss
loss_vals = {}
if 'object_detection' in self.hparams.heads:
det_loss = self._detection_loss(cls_outputs, box_outputs, labels,
loss_vals)
total_loss += det_loss
if 'segmentation' in self.hparams.heads:
seg_loss_layer = self.model.loss['seg_loss']
seg_loss = seg_loss_layer(labels['image_masks'], seg_outputs)
total_loss += seg_loss
loss_vals['seg_loss'] = seg_loss
loss_vals['loss'] = total_loss
return loss_vals
def _freeze_vars(self):
"""Get trainable variables."""
if self.hparams.var_freeze_expr:
return [
v for v in self.model.trainable_variables
if not re.match(self.hparams.var_freeze_expr, v.name)
]
return self.model.trainable_variables
def _reg_l2_loss(self, weight_decay, regex=r'.*(kernel|weight):0$'):
"""Return regularization l2 loss loss."""
var_match = re.compile(regex)
return weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in self._freeze_vars()
if var_match.match(v.name)
])
def _reg_l1_loss(self, weight_decay, regex=r'.*(kernel|weight):0$'):
"""Return regularization l1 loss loss."""
var_match = re.compile(regex)
return tf.contrib.layers.apply_regularization(
tf.keras.regularizers.l1(weight_decay / 2.0),
[v for v in self._freeze_vars()
if var_match.match(v.name)])
def _detection_loss(self, cls_outputs, box_outputs, labels, loss_vals):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundtruth targets.
loss_vals: A dict of loss values.
Returns:
total_loss: an integer tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: an integer tensor representing total class loss.
box_loss: an integer tensor representing total box regression loss.
box_iou_loss: an integer tensor representing total box iou loss.
"""
# convert to float32 for loss computing.
cls_outputs = [tf.cast(i, tf.float32) for i in cls_outputs]
box_outputs = [tf.cast(i, tf.float32) for i in box_outputs]
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0
levels = range(len(cls_outputs))
cls_losses = []
box_losses = []
for level in levels:
# Onehot encoding for classification labels.
cls_targets_at_level = tf.one_hot(
labels[f'cls_targets_{level + 3}'],
self.hparams.num_classes)
if self.hparams.data_format == 'channels_first':
targets_shape = tf.shape(cls_targets_at_level)
bs = targets_shape[0]
width = targets_shape[2]
height = targets_shape[3]
cls_targets_at_level = tf.reshape(
cls_targets_at_level,
[bs, -1, width, height])
else:
targets_shape = tf.shape(cls_targets_at_level)
bs = targets_shape[0]
width = targets_shape[1]
height = targets_shape[2]
cls_targets_at_level = tf.reshape(
cls_targets_at_level,
[bs, width, height, -1])
box_targets_at_level = labels[f'box_targets_{level + 3}']
class_loss_layer = self.model.loss.get('class_loss', None)
if class_loss_layer:
cls_loss = class_loss_layer([num_positives_sum, cls_targets_at_level],
cls_outputs[level])
if self.hparams.data_format == 'channels_first':
cls_loss = tf.reshape(
cls_loss, [bs, -1, width, height, self.hparams.num_classes])
else:
cls_loss = tf.reshape(
cls_loss, [bs, width, height, -1, self.hparams.num_classes])
cls_loss *= tf.cast(
tf.expand_dims(
tf.not_equal(labels[f'cls_targets_{level + 3}'], -2), -1),
tf.float32)
cls_losses.append(tf.reduce_sum(cls_loss))
if self.hparams.box_loss_weight and self.model.loss.get('box_loss', None):
box_loss_layer = self.model.loss['box_loss']
box_losses.append(
box_loss_layer(
[num_positives_sum, box_targets_at_level],
box_outputs[level]))
if self.hparams.iou_loss_type:
box_outputs = tf.concat([tf.reshape(v, [-1, 4]) for v in box_outputs], axis=0) # noqa pylint: disable=E1123
box_targets = tf.concat([ # noqa pylint: disable=E1123
tf.reshape(labels[f'box_targets_{level + 3}'], [-1, 4])
for level in levels], axis=0)
box_iou_loss_layer = self.model.loss['box_iou_loss']
box_iou_loss = box_iou_loss_layer([num_positives_sum, box_targets],
box_outputs)
loss_vals['box_iou_loss'] = box_iou_loss
else:
box_iou_loss = 0
cls_loss = tf.add_n(cls_losses) if cls_losses else 0
box_loss = tf.add_n(box_losses) if box_losses else 0
total_loss = (
cls_loss + self.hparams.box_loss_weight * box_loss +
self.hparams.iou_loss_weight * box_iou_loss)
loss_vals['det_loss'] = total_loss
loss_vals['cls_loss'] = cls_loss
loss_vals['box_loss'] = box_loss
return total_loss
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/efficientdet_module.py |
"""Callback related utils."""
import os
import tensorflow as tf
import horovod.tensorflow.keras.callbacks as hvd_callbacks
from wandb.keras import WandbCallback
from nvidia_tao_tf2.common.mlops.wandb import is_wandb_initialized
from nvidia_tao_tf2.cv.efficientdet.callback.eff_ema_checkpoint import EffEmaCheckpoint
from nvidia_tao_tf2.cv.efficientdet.callback.eff_checkpoint import EffCheckpoint
from nvidia_tao_tf2.cv.efficientdet.callback.eval_callback import COCOEvalCallback
from nvidia_tao_tf2.cv.efficientdet.callback.lr_tensorboard import LRTensorBoard
from nvidia_tao_tf2.cv.efficientdet.callback.logging_callback import MetricLogging
from nvidia_tao_tf2.cv.efficientdet.callback.moving_average_callback import MovingAverageCallback
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import is_main_process
def get_callbacks(hparams, eval_dataset, steps_per_epoch,
eval_model=None, initial_epoch=0):
"""Get callbacks for given hparams."""
callbacks = [hvd_callbacks.BroadcastGlobalVariablesCallback(0)]
if is_main_process():
tb_callback = tf.keras.callbacks.TensorBoard(
log_dir=os.path.join(hparams['results_dir'], 'tb_events'),
profile_batch=0, histogram_freq=1)
callbacks.append(tb_callback)
# set up checkpointing callbacks
ckpt_dir = hparams['results_dir'] # no longer in `weights` dir
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir, exist_ok=True)
if hparams['moving_average_decay'] > 0:
ckpt_callback = EffEmaCheckpoint(
eff_dir=ckpt_dir,
encryption_key=hparams['encryption_key'],
update_weights=False,
amp=hparams['mixed_precision'],
verbose=0,
save_freq='epoch',
save_weights_only=True,
period=hparams['checkpoint_interval'],
is_qat=hparams['qat'])
else:
ckpt_callback = EffCheckpoint(
eff_dir=ckpt_dir,
encryption_key=hparams['encryption_key'],
verbose=0,
save_freq='epoch',
save_weights_only=True,
period=hparams['checkpoint_interval'],
is_qat=hparams['qat'])
callbacks.append(ckpt_callback)
model_callback = EffCheckpoint(
eff_dir=hparams['results_dir'],
encryption_key=hparams['encryption_key'],
graph_only=True,
verbose=0,
save_freq='epoch',
save_weights_only=True,
period=hparams['checkpoint_interval'])
callbacks.append(model_callback)
# log LR in tensorboard
callbacks.append(
LRTensorBoard(
steps_per_epoch,
initial_epoch,
log_dir=os.path.join(hparams['results_dir'], 'tb_events')))
# status logging
callbacks.append(MetricLogging(hparams['num_epochs'], steps_per_epoch, initial_epoch))
# Setup the wandb logging callback if weights
# and biases have been initialized.
if is_wandb_initialized():
callbacks.append(WandbCallback())
cocoeval = COCOEvalCallback(
eval_dataset,
eval_model=eval_model,
eval_freq=hparams['checkpoint_interval'],
start_eval_epoch=hparams['eval_start'],
hparams=hparams)
callbacks.append(cocoeval)
if hparams['moving_average_decay']:
callbacks.append(MovingAverageCallback())
return callbacks
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/callback_builder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.