repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
models | models-master/research/object_detection/core/data_decoder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface for data decoders.
Data decoders decode the input data and return a dictionary of tensors keyed by
the entries in core.reader.Fields.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import six
class DataDecoder(six.with_metaclass(ABCMeta, object)):
"""Interface for data decoders."""
@abstractmethod
def decode(self, data):
"""Return a single image and associated labels.
Args:
data: a string tensor holding a serialized protocol buffer corresponding
to data for a single image.
Returns:
tensor_dict: a dictionary containing tensors. Possible keys are defined in
reader.Fields.
"""
pass
| 1,472 | 31.733333 | 80 | py |
models | models-master/research/object_detection/core/preprocessor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The preprocess function receives a tensor_dict which is a dictionary that maps
different field names to their tensors. For example,
tensor_dict[fields.InputDataFields.image] holds the image tensor.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
To preprocess multiple images with the same operations in cases where
nondeterministic operations are used, a preprocessor_cache.PreprocessorCache
object can be passed into the preprocess function or individual operations.
All nondeterministic operations except random_jitter_boxes support caching.
E.g.
Let tensor_dict{1,2,3,4,5} be copies of the same inputs.
Let preprocess_options contain nondeterministic operation(s) excluding
random_jitter_boxes.
cache1 = preprocessor_cache.PreprocessorCache()
cache2 = preprocessor_cache.PreprocessorCache()
a = preprocess(tensor_dict1, preprocess_options, preprocess_vars_cache=cache1)
b = preprocess(tensor_dict2, preprocess_options, preprocess_vars_cache=cache1)
c = preprocess(tensor_dict3, preprocess_options, preprocess_vars_cache=cache2)
d = preprocess(tensor_dict4, preprocess_options, preprocess_vars_cache=cache2)
e = preprocess(tensor_dict5, preprocess_options)
Then correspondings tensors of object pairs (a,b) and (c,d)
are guaranteed to be equal element-wise, but the equality of any other object
pair cannot be determined.
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import math
import sys
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import control_flow_ops
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops
from object_detection.core import preprocessor_cache
from object_detection.core import standard_fields as fields
from object_detection.utils import autoaugment_utils
from object_detection.utils import ops
from object_detection.utils import patch_ops
from object_detection.utils import shape_utils
def _apply_with_random_selector(x,
func,
num_cases,
preprocess_vars_cache=None,
key=''):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
If both preprocess_vars_cache AND key are the same between two calls, sel will
be the same value in both calls.
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
key: variable identifier for preprocess_vars_cache.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
generator_func = functools.partial(
tf.random_uniform, [], maxval=num_cases, dtype=tf.int32)
rand_sel = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.SELECTOR,
preprocess_vars_cache, key)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([func(
control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case)
for case in range(num_cases)])[0]
def _apply_with_random_selector_tuples(x,
func,
num_cases,
preprocess_vars_cache=None,
key=''):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
If both preprocess_vars_cache AND key are the same between two calls, sel will
be the same value in both calls.
Args:
x: A tuple of input tensors.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
key: variable identifier for preprocess_vars_cache.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
num_inputs = len(x)
generator_func = functools.partial(
tf.random_uniform, [], maxval=num_cases, dtype=tf.int32)
rand_sel = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.SELECTOR_TUPLES,
preprocess_vars_cache, key)
# Pass the real x only to one of the func calls.
tuples = [list() for t in x]
for case in range(num_cases):
new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x]
output = func(tuple(new_x), case)
for j in range(num_inputs):
tuples[j].append(output[j])
for i in range(num_inputs):
tuples[i] = control_flow_ops.merge(tuples[i])[0]
return tuple(tuples)
def _get_or_create_preprocess_rand_vars(generator_func,
function_id,
preprocess_vars_cache,
key=''):
"""Returns a tensor stored in preprocess_vars_cache or using generator_func.
If the tensor was previously generated and appears in the PreprocessorCache,
the previously generated tensor will be returned. Otherwise, a new tensor
is generated using generator_func and stored in the cache.
Args:
generator_func: A 0-argument function that generates a tensor.
function_id: identifier for the preprocessing function used.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
key: identifier for the variable stored.
Returns:
The generated tensor.
"""
if preprocess_vars_cache is not None:
var = preprocess_vars_cache.get(function_id, key)
if var is None:
var = generator_func()
preprocess_vars_cache.update(function_id, key, var)
else:
var = generator_func()
return var
def _random_integer(minval, maxval, seed):
"""Returns a random 0-D tensor between minval and maxval.
Args:
minval: minimum value of the random tensor.
maxval: maximum value of the random tensor.
seed: random seed.
Returns:
A random 0-D tensor between minval and maxval.
"""
return tf.random_uniform(
[], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed)
# TODO(mttang): This method is needed because the current
# tf.image.rgb_to_grayscale method does not support quantization. Replace with
# tf.image.rgb_to_grayscale after quantization support is added.
def _rgb_to_grayscale(images, name=None):
"""Converts one or more images from RGB to Grayscale.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 1, containing the Grayscale value of the
pixels.
Args:
images: The RGB tensor to convert. Last dimension must have size 3 and
should contain RGB values.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with tf.name_scope(name, 'rgb_to_grayscale', [images]) as name:
images = tf.convert_to_tensor(images, name='images')
# Remember original dtype to so we can convert back if needed
orig_dtype = images.dtype
flt_image = tf.image.convert_image_dtype(images, tf.float32)
# Reference for converting between RGB and grayscale.
# https://en.wikipedia.org/wiki/Luma_%28video%29
rgb_weights = [0.2989, 0.5870, 0.1140]
rank_1 = tf.expand_dims(tf.rank(images) - 1, 0)
gray_float = tf.reduce_sum(
flt_image * rgb_weights, rank_1, keep_dims=True)
gray_float.set_shape(images.get_shape()[:-1].concatenate([1]))
return tf.image.convert_image_dtype(gray_float, orig_dtype, name=name)
def normalize_image(image, original_minval, original_maxval, target_minval,
target_maxval):
"""Normalizes pixel values in the image.
Moves the pixel values from the current [original_minval, original_maxval]
range to a the [target_minval, target_maxval] range.
Args:
image: rank 3 float32 tensor containing 1
image -> [height, width, channels].
original_minval: current image minimum value.
original_maxval: current image maximum value.
target_minval: target image minimum value.
target_maxval: target image maximum value.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('NormalizeImage', values=[image]):
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.cast(image, dtype=tf.float32)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def retain_boxes_above_threshold(boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
threshold=0.0):
"""Retains boxes whose label weight is above a given threshold.
If the label weight for a box is missing (represented by NaN), the box is
retained. The boxes that don't pass the threshold will not appear in the
returned tensor.
Args:
boxes: float32 tensor of shape [num_instance, 4] representing boxes
location in normalized coordinates.
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
label_weights: float32 tensor of shape [num_instance] representing the
weight for each box.
label_confidences: float32 tensor of shape [num_instance] representing the
confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks are of
the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
threshold: scalar python float.
Returns:
retained_boxes: [num_retained_instance, 4]
retianed_labels: [num_retained_instance]
retained_label_weights: [num_retained_instance]
If multiclass_scores, masks, or keypoints are not None, the function also
returns:
retained_multiclass_scores: [num_retained_instance, num_classes]
retained_masks: [num_retained_instance, height, width]
retained_keypoints: [num_retained_instance, num_keypoints, 2]
"""
with tf.name_scope('RetainBoxesAboveThreshold',
values=[boxes, labels, label_weights]):
indices = tf.where(
tf.logical_or(label_weights > threshold, tf.is_nan(label_weights)))
indices = tf.squeeze(indices, axis=1)
retained_boxes = tf.gather(boxes, indices)
retained_labels = tf.gather(labels, indices)
retained_label_weights = tf.gather(label_weights, indices)
result = [retained_boxes, retained_labels, retained_label_weights]
if label_confidences is not None:
retained_label_confidences = tf.gather(label_confidences, indices)
result.append(retained_label_confidences)
if multiclass_scores is not None:
retained_multiclass_scores = tf.gather(multiclass_scores, indices)
result.append(retained_multiclass_scores)
if masks is not None:
retained_masks = tf.gather(masks, indices)
result.append(retained_masks)
if keypoints is not None:
retained_keypoints = tf.gather(keypoints, indices)
result.append(retained_keypoints)
return result
def drop_label_probabilistically(boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
dropped_label=None,
drop_probability=0.0,
seed=None):
"""Drops boxes of a certain label with probability drop_probability.
Boxes of the label dropped_label will not appear in the returned tensor.
Args:
boxes: float32 tensor of shape [num_instance, 4] representing boxes
location in normalized coordinates.
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
label_weights: float32 tensor of shape [num_instance] representing the
weight for each box.
label_confidences: float32 tensor of shape [num_instance] representing the
confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks are of
the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
dropped_label: int32 id of label to drop.
drop_probability: float32 probability of dropping a label.
seed: random seed.
Returns:
retained_boxes: [num_retained_instance, 4]
retianed_labels: [num_retained_instance]
retained_label_weights: [num_retained_instance]
If multiclass_scores, masks, or keypoints are not None, the function also
returns:
retained_multiclass_scores: [num_retained_instance, num_classes]
retained_masks: [num_retained_instance, height, width]
retained_keypoints: [num_retained_instance, num_keypoints, 2]
"""
with tf.name_scope('DropLabelProbabilistically',
values=[boxes, labels]):
indices = tf.where(
tf.logical_or(
tf.random_uniform(tf.shape(labels), seed=seed) > drop_probability,
tf.not_equal(labels, dropped_label)))
indices = tf.squeeze(indices, axis=1)
retained_boxes = tf.gather(boxes, indices)
retained_labels = tf.gather(labels, indices)
retained_label_weights = tf.gather(label_weights, indices)
result = [retained_boxes, retained_labels, retained_label_weights]
if label_confidences is not None:
retained_label_confidences = tf.gather(label_confidences, indices)
result.append(retained_label_confidences)
if multiclass_scores is not None:
retained_multiclass_scores = tf.gather(multiclass_scores, indices)
result.append(retained_multiclass_scores)
if masks is not None:
retained_masks = tf.gather(masks, indices)
result.append(retained_masks)
if keypoints is not None:
retained_keypoints = tf.gather(keypoints, indices)
result.append(retained_keypoints)
return result
def remap_labels(labels,
original_labels=None,
new_label=None):
"""Remaps labels that have an id in original_labels to new_label.
Args:
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
original_labels: int list of original labels that should be mapped from.
new_label: int label to map to
Returns:
Remapped labels
"""
new_labels = labels
for original_label in original_labels:
change = tf.where(
tf.equal(new_labels, original_label),
tf.add(tf.zeros_like(new_labels), new_label - original_label),
tf.zeros_like(new_labels))
new_labels = tf.add(
new_labels,
change)
new_labels = tf.reshape(new_labels, tf.shape(labels))
return new_labels
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: Float32 tensor containing the bounding boxes -> [..., 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each last dimension is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=-1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], axis=-1)
return flipped_boxes
def _flip_boxes_up_down(boxes):
"""Up-down flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_ymin = tf.subtract(1.0, ymax)
flipped_ymax = tf.subtract(1.0, ymin)
flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1)
return flipped_boxes
def _rot90_boxes(boxes):
"""Rotate boxes counter-clockwise by 90 degrees.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Rotated boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
rotated_ymin = tf.subtract(1.0, xmax)
rotated_ymax = tf.subtract(1.0, xmin)
rotated_xmin = ymin
rotated_xmax = ymax
rotated_boxes = tf.concat(
[rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1)
return rotated_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def _flip_masks_up_down(masks):
"""Up-down flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, ::-1, :]
def _rot90_masks(masks):
"""Rotate masks counter-clockwise by 90 degrees.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
rotated masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
masks = tf.transpose(masks, [0, 2, 1])
return masks[:, ::-1, :]
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_visibilities=None,
densepose_part_ids=None,
densepose_surface_coords=None,
keypoint_depths=None,
keypoint_depth_weights=None,
keypoint_flip_permutation=None,
probability=0.5,
seed=None,
preprocess_vars_cache=None):
"""Randomly flips the image and detections horizontally.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_visibilities: (optional) rank 2 bool tensor with shape
[num_instances, num_keypoints].
densepose_part_ids: (optional) rank 2 int32 tensor with shape
[num_instances, num_points] holding the part id for each
sampled point. These part_ids are 0-indexed, where the
first non-background part has index 0.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[num_instances, num_points, 4]. The DensePose
coordinates are of the form (y, x, v, u) where
(y, x) are the normalized image coordinates for a
sampled point, and (v, u) is the surface
coordinate for the part.
keypoint_depths: (optional) rank 2 float32 tensor with shape [num_instances,
num_keypoints] representing the relative depth of the
keypoints.
keypoint_depth_weights: (optional) rank 2 float32 tensor with shape
[num_instances, num_keypoints] representing the
weights of the relative depth of the keypoints.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
probability: the probability of performing this augmentation.
seed: random seed
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, keypoint_visibilities,
keypoint_flip_permutation, densepose_part_ids, or densepose_surface_coords
are not None,the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
keypoint_visibilities: rank 2 bool tensor with shape
[num_instances, num_keypoints].
densepose_part_ids: rank 2 int32 tensor with shape
[num_instances, num_points].
densepose_surface_coords: rank 3 float32 tensor with shape
[num_instances, num_points, 4].
keypoint_depths: rank 2 float32 tensor with shape [num_instances,
num_keypoints]
keypoint_depth_weights: rank 2 float32 tensor with shape [num_instances,
num_keypoints].
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
ValueError: if either densepose_part_ids or densepose_surface_coords is
not None, but both are not None.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
if ((densepose_part_ids is not None and densepose_surface_coords is None) or
(densepose_part_ids is None and densepose_surface_coords is not None)):
raise ValueError(
'Must provide both `densepose_part_ids` and `densepose_surface_coords`')
with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_a_flip_random = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.HORIZONTAL_FLIP,
preprocess_vars_cache)
do_a_flip_random = tf.less(do_a_flip_random, probability)
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation),
lambda: keypoints)
result.append(keypoints)
# flip keypoint visibilities
if (keypoint_visibilities is not None and
keypoint_flip_permutation is not None):
kpt_flip_perm = keypoint_flip_permutation
keypoint_visibilities = tf.cond(
do_a_flip_random,
lambda: tf.gather(keypoint_visibilities, kpt_flip_perm, axis=1),
lambda: keypoint_visibilities)
result.append(keypoint_visibilities)
# flip DensePose parts and coordinates
if densepose_part_ids is not None:
flip_densepose_fn = functools.partial(
densepose_ops.flip_horizontal, densepose_part_ids,
densepose_surface_coords)
densepose_tensors = tf.cond(
do_a_flip_random,
flip_densepose_fn,
lambda: (densepose_part_ids, densepose_surface_coords))
result.extend(densepose_tensors)
# flip keypoint depths and weights.
if (keypoint_depths is not None and
keypoint_flip_permutation is not None):
kpt_flip_perm = keypoint_flip_permutation
keypoint_depths = tf.cond(
do_a_flip_random,
lambda: tf.gather(keypoint_depths, kpt_flip_perm, axis=1),
lambda: keypoint_depths)
keypoint_depth_weights = tf.cond(
do_a_flip_random,
lambda: tf.gather(keypoint_depth_weights, kpt_flip_perm, axis=1),
lambda: keypoint_depth_weights)
result.append(keypoint_depths)
result.append(keypoint_depth_weights)
return tuple(result)
def random_vertical_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
probability=0.5,
seed=None,
preprocess_vars_cache=None):
"""Randomly flips the image and detections vertically.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
probability: the probability of performing this augmentation.
seed: random seed
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_up_down(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomVerticalFlip', values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_a_flip_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.VERTICAL_FLIP,
preprocess_vars_cache)
do_a_flip_random = tf.less(do_a_flip_random, probability)
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation),
lambda: keypoints)
result.append(keypoints)
return tuple(result)
def random_rotation90(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_rot_permutation=None,
probability=0.5,
seed=None,
preprocess_vars_cache=None):
"""Randomly rotates the image and detections 90 degrees counter-clockwise.
The probability of rotating the image is 50%. This can be combined with
random_horizontal_flip and random_vertical_flip to produce an output with a
uniform distribution of the eight possible 90 degree rotation / reflection
combinations.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_rot_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
probability: the probability of performing this augmentation.
seed: random seed
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
If boxes, masks, and keypoints, are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
def _rot90_image(image):
# flip image
image_rotated = tf.image.rot90(image)
return image_rotated
with tf.name_scope('RandomRotation90', values=[image, boxes]):
result = []
# random variable defining whether to rotate by 90 degrees or not
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_a_rot90_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.ROTATION90,
preprocess_vars_cache)
do_a_rot90_random = tf.less(do_a_rot90_random, probability)
# flip image
image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image),
lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None:
keypoints = tf.cond(
do_a_rot90_random,
lambda: keypoint_ops.rot90(keypoints, keypoint_rot_permutation),
lambda: keypoints)
result.append(keypoints)
return tuple(result)
def random_pixel_value_scale(image,
minval=0.9,
maxval=1.1,
seed=None,
preprocess_vars_cache=None):
"""Scales each value in the pixels of the image.
This function scales each pixel independent of the other ones.
For each value in image tensor, draws a random number between
minval and maxval and multiples the values with them.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
minval: lower ratio of scaling pixel values.
maxval: upper ratio of scaling pixel values.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomPixelValueScale', values=[image]):
generator_func = functools.partial(
tf.random_uniform, tf.shape(image),
minval=minval, maxval=maxval,
dtype=tf.float32, seed=seed)
color_coef = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.PIXEL_VALUE_SCALE,
preprocess_vars_cache)
image = tf.multiply(image, color_coef)
image = tf.clip_by_value(image, 0.0, 255.0)
return image
def random_image_scale(image,
masks=None,
min_scale_ratio=0.5,
max_scale_ratio=2.0,
seed=None,
preprocess_vars_cache=None):
"""Scales the image size.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels].
masks: (optional) rank 3 float32 tensor containing masks with
size [height, width, num_masks]. The value is set to None if there are no
masks.
min_scale_ratio: minimum scaling ratio.
max_scale_ratio: maximum scaling ratio.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
masks: If masks is not none, resized masks which are the same rank as input
masks will be returned.
"""
with tf.name_scope('RandomImageScale', values=[image]):
result = []
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
generator_func = functools.partial(
tf.random_uniform, [],
minval=min_scale_ratio, maxval=max_scale_ratio,
dtype=tf.float32, seed=seed)
size_coef = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.IMAGE_SCALE,
preprocess_vars_cache)
image_newysize = tf.cast(
tf.multiply(tf.cast(image_height, dtype=tf.float32), size_coef),
dtype=tf.int32)
image_newxsize = tf.cast(
tf.multiply(tf.cast(image_width, dtype=tf.float32), size_coef),
dtype=tf.int32)
image = tf.image.resize_images(
image, [image_newysize, image_newxsize], align_corners=True)
result.append(image)
if masks is not None:
masks = tf.image.resize_images(
masks, [image_newysize, image_newxsize],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
result.append(masks)
return tuple(result)
def _augment_only_rgb_channels(image, augment_function):
"""Augments only the RGB slice of an image with additional channels."""
# Skipping the concat if possible reduces latency.
if image.shape[2] == 3:
return augment_function(image)
rgb_slice = image[:, :, :3]
augmented_rgb_slice = augment_function(rgb_slice)
image = tf.concat([augmented_rgb_slice, image[:, :, 3:]], -1)
return image
def random_rgb_to_gray(image,
probability=0.1,
seed=None,
preprocess_vars_cache=None):
"""Changes the image from RGB to Grayscale with the given probability.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
probability: the probability of returning a grayscale image.
The probability should be a number between [0, 1].
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
def _image_to_gray(image):
image_gray1 = _rgb_to_grayscale(image)
image_gray3 = tf.image.grayscale_to_rgb(image_gray1)
return image_gray3
with tf.name_scope('RandomRGBtoGray', values=[image]):
# random variable defining whether to change to grayscale or not
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_gray_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.RGB_TO_GRAY,
preprocess_vars_cache)
image = tf.cond(
tf.greater(do_gray_random, probability), lambda: image,
lambda: _augment_only_rgb_channels(image, _image_to_gray))
return image
def adjust_gamma(image, gamma=1.0, gain=1.0):
"""Adjusts the gamma.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
gamma: the gamma value. Must be a non-negative real number.
gain: a constant multiplier.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('AdjustGamma', values=[image]):
def _adjust_gamma(image):
image = tf.image.adjust_gamma(image / 255, gamma, gain) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_gamma)
return image
def random_adjust_brightness(image,
max_delta=0.2,
seed=None,
preprocess_vars_cache=None):
"""Randomly adjusts brightness.
Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
max_delta: how much to change the brightness. A value between [0, 1).
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustBrightness', values=[image]):
generator_func = functools.partial(tf.random_uniform, [],
-max_delta, max_delta, seed=seed)
delta = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADJUST_BRIGHTNESS,
preprocess_vars_cache)
def _adjust_brightness(image):
image = tf.image.adjust_brightness(image / 255, delta) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_brightness)
return image
def random_adjust_contrast(image,
min_delta=0.8,
max_delta=1.25,
seed=None,
preprocess_vars_cache=None):
"""Randomly adjusts contrast.
Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
min_delta: see max_delta.
max_delta: how much to change the contrast. Contrast will change with a
value between min_delta and max_delta. This value will be
multiplied to the current contrast of the image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustContrast', values=[image]):
generator_func = functools.partial(tf.random_uniform, [],
min_delta, max_delta, seed=seed)
contrast_factor = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADJUST_CONTRAST,
preprocess_vars_cache)
def _adjust_contrast(image):
image = tf.image.adjust_contrast(image / 255, contrast_factor) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_contrast)
return image
def random_adjust_hue(image,
max_delta=0.02,
seed=None,
preprocess_vars_cache=None):
"""Randomly adjusts hue.
Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
max_delta: change hue randomly with a value between 0 and max_delta.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustHue', values=[image]):
generator_func = functools.partial(tf.random_uniform, [],
-max_delta, max_delta, seed=seed)
delta = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.ADJUST_HUE,
preprocess_vars_cache)
def _adjust_hue(image):
image = tf.image.adjust_hue(image / 255, delta) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_hue)
return image
def random_adjust_saturation(image,
min_delta=0.8,
max_delta=1.25,
seed=None,
preprocess_vars_cache=None):
"""Randomly adjusts saturation.
Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
min_delta: see max_delta.
max_delta: how much to change the saturation. Saturation will change with a
value between min_delta and max_delta. This value will be
multiplied to the current saturation of the image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('RandomAdjustSaturation', values=[image]):
generator_func = functools.partial(tf.random_uniform, [],
min_delta, max_delta, seed=seed)
saturation_factor = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADJUST_SATURATION,
preprocess_vars_cache)
def _adjust_saturation(image):
image = tf.image.adjust_saturation(image / 255, saturation_factor) * 255
image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0)
return image
image = _augment_only_rgb_channels(image, _adjust_saturation)
return image
def random_distort_color(image, color_ordering=0, preprocess_vars_cache=None):
"""Randomly distorts color.
Randomly distorts color using a combination of brightness, hue, contrast and
saturation changes. Makes sure the output image is still between 0 and 255.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
color_ordering: Python int, a type of distortion (valid values: 0, 1).
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same shape as input image.
Raises:
ValueError: if color_ordering is not in {0, 1}.
"""
with tf.name_scope('RandomDistortColor', values=[image]):
if color_ordering == 0:
image = random_adjust_brightness(
image, max_delta=32. / 255.,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_saturation(
image, min_delta=0.5, max_delta=1.5,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_hue(
image, max_delta=0.2,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_contrast(
image, min_delta=0.5, max_delta=1.5,
preprocess_vars_cache=preprocess_vars_cache)
elif color_ordering == 1:
image = random_adjust_brightness(
image, max_delta=32. / 255.,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_contrast(
image, min_delta=0.5, max_delta=1.5,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_saturation(
image, min_delta=0.5, max_delta=1.5,
preprocess_vars_cache=preprocess_vars_cache)
image = random_adjust_hue(
image, max_delta=0.2,
preprocess_vars_cache=preprocess_vars_cache)
else:
raise ValueError('color_ordering must be in {0, 1}')
return image
def random_jitter_boxes(boxes, ratio=0.05, jitter_mode='default', seed=None):
"""Randomly jitters boxes in image.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
ratio: The ratio of the box width and height that the corners can jitter.
For example if the width is 100 pixels and ratio is 0.05,
the corners can jitter up to 5 pixels in the x direction.
jitter_mode: One of
shrink - Only shrinks boxes.
expand - Only expands boxes.
expand_symmetric - Expands the boxes symmetrically along height and width
dimensions without changing the box center. The ratios of expansion
along X, Y dimensions are independent
shrink_symmetric - Shrinks the boxes symmetrically along height and width
dimensions without changing the box center. The ratios of shrinking
along X, Y dimensions are independent
expand_symmetric_xy - Expands the boxes symetrically along height and
width dimensions and the ratio of expansion is same for both.
shrink_symmetric_xy - Shrinks the boxes symetrically along height and
width dimensions and the ratio of shrinking is same for both.
default - Randomly and independently perturbs each box boundary.
seed: random seed.
Returns:
boxes: boxes which is the same shape as input boxes.
"""
with tf.name_scope('RandomJitterBoxes'):
ymin, xmin, ymax, xmax = (boxes[:, i] for i in range(4))
blist = box_list.BoxList(boxes)
ycenter, xcenter, height, width = blist.get_center_coordinates_and_sizes()
height = tf.maximum(tf.abs(height), 1e-6)
width = tf.maximum(tf.abs(width), 1e-6)
if jitter_mode in ['shrink', 'shrink_symmetric', 'shrink_symmetric_xy']:
min_ratio, max_ratio = -ratio, 0
elif jitter_mode in ['expand', 'expand_symmetric', 'expand_symmetric_xy']:
min_ratio, max_ratio = 0, ratio
elif jitter_mode == 'default':
min_ratio, max_ratio = -ratio, ratio
else:
raise ValueError('Unknown jitter mode - %s' % jitter_mode)
num_boxes = tf.shape(boxes)[0]
if jitter_mode in ['expand_symmetric', 'shrink_symmetric',
'expand_symmetric_xy', 'shrink_symmetric_xy']:
distortion = 1.0 + tf.random.uniform(
[num_boxes, 2], minval=min_ratio, maxval=max_ratio, dtype=tf.float32,
seed=seed)
height_distortion = distortion[:, 0]
width_distortion = distortion[:, 1]
# This is to ensure that all boxes are augmented symmetrically. We clip
# each boundary to lie within the image, and when doing so, we also
# adjust its symmetric counterpart.
max_height_distortion = tf.abs(tf.minimum(
(2.0 * ycenter) / height, 2.0 * (1 - ycenter) / height))
max_width_distortion = tf.abs(tf.minimum(
(2.0 * xcenter) / width, 2.0 * (1 - xcenter) / width))
if jitter_mode in ['expand_symmetric_xy', 'shrink_symmetric_xy']:
height_distortion = width_distortion = distortion[:, 0]
max_height_distortion = max_width_distortion = (
tf.minimum(max_width_distortion, max_height_distortion))
height_distortion = tf.clip_by_value(
height_distortion, -max_height_distortion, max_height_distortion)
width_distortion = tf.clip_by_value(
width_distortion, -max_width_distortion, max_width_distortion)
ymin = ycenter - (height * height_distortion / 2.0)
ymax = ycenter + (height * height_distortion / 2.0)
xmin = xcenter - (width * width_distortion / 2.0)
xmax = xcenter + (width * width_distortion / 2.0)
elif jitter_mode in ['expand', 'shrink', 'default']:
distortion = 1.0 + tf.random.uniform(
[num_boxes, 4], minval=min_ratio, maxval=max_ratio, dtype=tf.float32,
seed=seed)
ymin_jitter = height * distortion[:, 0]
xmin_jitter = width * distortion[:, 1]
ymax_jitter = height * distortion[:, 2]
xmax_jitter = width * distortion[:, 3]
ymin, ymax = ycenter - (ymin_jitter / 2.0), ycenter + (ymax_jitter / 2.0)
xmin, xmax = xcenter - (xmin_jitter / 2.0), xcenter + (xmax_jitter / 2.0)
else:
raise ValueError('Unknown jitter mode - %s' % jitter_mode)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1)
return tf.clip_by_value(boxes, 0.0, 1.0)
def _strict_random_crop_image(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
mask_weights=None,
keypoints=None,
keypoint_visibilities=None,
densepose_num_points=None,
densepose_part_ids=None,
densepose_surface_coords=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3,
clip_boxes=True,
preprocess_vars_cache=None):
"""Performs random crop.
Note: Keypoint coordinates that are outside the crop will be set to NaN, which
is consistent with the original keypoint encoding for non-existing keypoints.
This function always crops the image and is supposed to be used by
`random_crop_image` function which sometimes returns the image unchanged.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
mask_weights: (optional) rank 1 float32 tensor with shape [num_instances]
with instance masks weights.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_visibilities: (optional) rank 2 bool tensor with shape
[num_instances, num_keypoints].
densepose_num_points: (optional) rank 1 int32 tensor with shape
[num_instances] with the number of sampled points per
instance.
densepose_part_ids: (optional) rank 2 int32 tensor with shape
[num_instances, num_points] holding the part id for each
sampled point. These part_ids are 0-indexed, where the
first non-background part has index 0.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[num_instances, num_points, 4]. The DensePose
coordinates are of the form (y, x, v, u) where
(y, x) are the normalized image coordinates for a
sampled point, and (v, u) is the surface
coordinate for the part.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If label_weights, multiclass_scores, masks, mask_weights, keypoints,
keypoint_visibilities, densepose_num_points, densepose_part_ids, or
densepose_surface_coords is not None, the function also returns:
label_weights: rank 1 float32 tensor with shape [num_instances].
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
mask_weights: rank 1 float32 tensor with shape [num_instances] with mask
weights.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
keypoint_visibilities: rank 2 bool tensor with shape
[num_instances, num_keypoints]
densepose_num_points: rank 1 int32 tensor with shape [num_instances].
densepose_part_ids: rank 2 int32 tensor with shape
[num_instances, num_points].
densepose_surface_coords: rank 3 float32 tensor with shape
[num_instances, num_points, 4].
Raises:
ValueError: If some but not all of the DensePose tensors are provided.
"""
with tf.name_scope('RandomCropImage', values=[image, boxes]):
densepose_tensors = [densepose_num_points, densepose_part_ids,
densepose_surface_coords]
if (any(t is not None for t in densepose_tensors) and
not all(t is not None for t in densepose_tensors)):
raise ValueError('If cropping DensePose labels, must provide '
'`densepose_num_points`, `densepose_part_ids`, and '
'`densepose_surface_coords`')
image_shape = tf.shape(image)
# boxes are [N, 4]. Lets first make them [N, 1, 4].
boxes_expanded = tf.expand_dims(
tf.clip_by_value(
boxes, clip_value_min=0.0, clip_value_max=1.0), 1)
generator_func = functools.partial(
tf.image.sample_distorted_bounding_box,
image_shape,
bounding_boxes=boxes_expanded,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=100,
use_image_if_no_bounding_boxes=True)
# for ssd cropping, each value of min_object_covered has its own
# cached random variable
sample_distorted_bounding_box = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.STRICT_CROP_IMAGE,
preprocess_vars_cache, key=min_object_covered)
im_box_begin, im_box_size, im_box = sample_distorted_bounding_box
im_box_end = im_box_begin + im_box_size
new_image = image[im_box_begin[0]:im_box_end[0],
im_box_begin[1]:im_box_end[1], :]
new_image.set_shape([None, None, image.get_shape()[2]])
# [1, 4]
im_box_rank2 = tf.squeeze(im_box, axis=[0])
# [4]
im_box_rank1 = tf.squeeze(im_box)
boxlist = box_list.BoxList(boxes)
boxlist.add_field('labels', labels)
if label_weights is not None:
boxlist.add_field('label_weights', label_weights)
if label_confidences is not None:
boxlist.add_field('label_confidences', label_confidences)
if multiclass_scores is not None:
boxlist.add_field('multiclass_scores', multiclass_scores)
im_boxlist = box_list.BoxList(im_box_rank2)
# remove boxes that are outside cropped image
boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window(
boxlist, im_box_rank1)
# remove boxes that are outside image
overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(
boxlist, im_boxlist, overlap_thresh)
# change the coordinate of the remaining boxes
new_labels = overlapping_boxlist.get_field('labels')
new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,
im_box_rank1)
new_boxes = new_boxlist.get()
if clip_boxes:
new_boxes = tf.clip_by_value(
new_boxes, clip_value_min=0.0, clip_value_max=1.0)
result = [new_image, new_boxes, new_labels]
if label_weights is not None:
new_label_weights = overlapping_boxlist.get_field('label_weights')
result.append(new_label_weights)
if label_confidences is not None:
new_label_confidences = overlapping_boxlist.get_field('label_confidences')
result.append(new_label_confidences)
if multiclass_scores is not None:
new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores')
result.append(new_multiclass_scores)
if masks is not None:
masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids)
masks_of_boxes_completely_inside_window = tf.gather(
masks_of_boxes_inside_window, keep_ids)
new_masks = masks_of_boxes_completely_inside_window[:, im_box_begin[
0]:im_box_end[0], im_box_begin[1]:im_box_end[1]]
result.append(new_masks)
if mask_weights is not None:
mask_weights_inside_window = tf.gather(mask_weights, inside_window_ids)
mask_weights_completely_inside_window = tf.gather(
mask_weights_inside_window, keep_ids)
result.append(mask_weights_completely_inside_window)
if keypoints is not None:
keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids)
keypoints_of_boxes_completely_inside_window = tf.gather(
keypoints_of_boxes_inside_window, keep_ids)
new_keypoints = keypoint_ops.change_coordinate_frame(
keypoints_of_boxes_completely_inside_window, im_box_rank1)
if clip_boxes:
new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,
[0.0, 0.0, 1.0, 1.0])
result.append(new_keypoints)
if keypoint_visibilities is not None:
kpt_vis_of_boxes_inside_window = tf.gather(keypoint_visibilities,
inside_window_ids)
kpt_vis_of_boxes_completely_inside_window = tf.gather(
kpt_vis_of_boxes_inside_window, keep_ids)
if clip_boxes:
# Set any keypoints with NaN coordinates to invisible.
new_kpt_visibilities = keypoint_ops.set_keypoint_visibilities(
new_keypoints, kpt_vis_of_boxes_completely_inside_window)
result.append(new_kpt_visibilities)
if densepose_num_points is not None:
filtered_dp_tensors = []
for dp_tensor in densepose_tensors:
dp_tensor_inside_window = tf.gather(dp_tensor, inside_window_ids)
dp_tensor_completely_inside_window = tf.gather(dp_tensor_inside_window,
keep_ids)
filtered_dp_tensors.append(dp_tensor_completely_inside_window)
new_dp_num_points = filtered_dp_tensors[0]
new_dp_point_ids = filtered_dp_tensors[1]
new_dp_surf_coords = densepose_ops.change_coordinate_frame(
filtered_dp_tensors[2], im_box_rank1)
if clip_boxes:
new_dp_num_points, new_dp_point_ids, new_dp_surf_coords = (
densepose_ops.prune_outside_window(
new_dp_num_points, new_dp_point_ids, new_dp_surf_coords,
window=[0.0, 0.0, 1.0, 1.0]))
result.extend([new_dp_num_points, new_dp_point_ids, new_dp_surf_coords])
return tuple(result)
def random_crop_image(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
mask_weights=None,
keypoints=None,
keypoint_visibilities=None,
densepose_num_points=None,
densepose_part_ids=None,
densepose_surface_coords=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3,
clip_boxes=True,
random_coef=0.0,
seed=None,
preprocess_vars_cache=None):
"""Randomly crops the image.
Given the input image and its bounding boxes, this op randomly
crops a subimage. Given a user-provided set of input constraints,
the crop window is resampled until it satisfies these constraints.
If within 100 trials it is unable to find a valid crop, the original
image is returned. See the Args section for a description of the input
constraints. Both input boxes and returned Boxes are in normalized
form (e.g., lie in the unit square [0, 1]).
This function will return the original image with probability random_coef.
Note: Keypoint coordinates that are outside the crop will be set to NaN, which
is consistent with the original keypoint encoding for non-existing keypoints.
Also, the keypoint visibility will be set to False.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances].
representing the confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
mask_weights: (optional) rank 1 float32 tensor with shape [num_instances]
containing weights for each instance mask.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_visibilities: (optional) rank 2 bool tensor with shape
[num_instances, num_keypoints].
densepose_num_points: (optional) rank 1 int32 tensor with shape
[num_instances] with the number of sampled points per
instance.
densepose_part_ids: (optional) rank 2 int32 tensor with shape
[num_instances, num_points] holding the part id for each
sampled point. These part_ids are 0-indexed, where the
first non-background part has index 0.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[num_instances, num_points, 4]. The DensePose
coordinates are of the form (y, x, v, u) where
(y, x) are the normalized image coordinates for a
sampled point, and (v, u) is the surface
coordinate for the part.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
labels: new labels.
If label_weights, multiclass_scores, masks, keypoints,
keypoint_visibilities, densepose_num_points, densepose_part_ids,
densepose_surface_coords is not None, the function also returns:
label_weights: rank 1 float32 tensor with shape [num_instances].
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
mask_weights: rank 1 float32 tensor with shape [num_instances].
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
keypoint_visibilities: rank 2 bool tensor with shape
[num_instances, num_keypoints]
densepose_num_points: rank 1 int32 tensor with shape [num_instances].
densepose_part_ids: rank 2 int32 tensor with shape
[num_instances, num_points].
densepose_surface_coords: rank 3 float32 tensor with shape
[num_instances, num_points, 4].
"""
def strict_random_crop_image_fn():
return _strict_random_crop_image(
image,
boxes,
labels,
label_weights,
label_confidences=label_confidences,
multiclass_scores=multiclass_scores,
masks=masks,
mask_weights=mask_weights,
keypoints=keypoints,
keypoint_visibilities=keypoint_visibilities,
densepose_num_points=densepose_num_points,
densepose_part_ids=densepose_part_ids,
densepose_surface_coords=densepose_surface_coords,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
clip_boxes=clip_boxes,
preprocess_vars_cache=preprocess_vars_cache)
# avoids tf.cond to make faster RCNN training on borg. See b/140057645.
if random_coef < sys.float_info.min:
result = strict_random_crop_image_fn()
else:
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_a_crop_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.CROP_IMAGE,
preprocess_vars_cache)
do_a_crop_random = tf.greater(do_a_crop_random, random_coef)
outputs = [image, boxes, labels]
if label_weights is not None:
outputs.append(label_weights)
if label_confidences is not None:
outputs.append(label_confidences)
if multiclass_scores is not None:
outputs.append(multiclass_scores)
if masks is not None:
outputs.append(masks)
if mask_weights is not None:
outputs.append(mask_weights)
if keypoints is not None:
outputs.append(keypoints)
if keypoint_visibilities is not None:
outputs.append(keypoint_visibilities)
if densepose_num_points is not None:
outputs.extend([densepose_num_points, densepose_part_ids,
densepose_surface_coords])
result = tf.cond(do_a_crop_random, strict_random_crop_image_fn,
lambda: tuple(outputs))
return result
def random_pad_image(image,
boxes,
masks=None,
keypoints=None,
densepose_surface_coords=None,
min_image_size=None,
max_image_size=None,
pad_color=None,
center_pad=False,
seed=None,
preprocess_vars_cache=None):
"""Randomly pads the image.
This function randomly pads the image with zeros. The final size of the
padded image will be between min_image_size and max_image_size.
if min_image_size is smaller than the input image size, min_image_size will
be set to the input image size. The same for max_image_size. The input image
will be located at a uniformly random location inside the padded image.
The relative location of the boxes to the original image will remain the same.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[N, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[N, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[N, num_points, 4]. The DensePose coordinates are
of the form (y, x, v, u) where (y, x) are the
normalized image coordinates for a sampled point,
and (v, u) is the surface coordinate for the part.
min_image_size: a tensor of size [min_height, min_width], type tf.int32.
If passed as None, will be set to image size
[height, width].
max_image_size: a tensor of size [max_height, max_width], type tf.int32.
If passed as None, will be set to twice the
image [height * 2, width * 2].
pad_color: padding color. A rank 1 tensor of [channels] with dtype=
tf.float32. if set as None, it will be set to average color of
the input image.
center_pad: whether the original image will be padded to the center, or
randomly padded (which is default).
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
if masks is not None, the function also returns:
masks: rank 3 float32 tensor with shape [N, new_height, new_width]
if keypoints is not None, the function also returns:
keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2]
if densepose_surface_coords is not None, the function also returns:
densepose_surface_coords: rank 3 float32 tensor with shape
[num_instances, num_points, 4]
"""
if pad_color is None:
pad_color = tf.reduce_mean(image, axis=[0, 1])
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
if max_image_size is None:
max_image_size = tf.stack([image_height * 2, image_width * 2])
max_image_size = tf.maximum(max_image_size,
tf.stack([image_height, image_width]))
if min_image_size is None:
min_image_size = tf.stack([image_height, image_width])
min_image_size = tf.maximum(min_image_size,
tf.stack([image_height, image_width]))
target_height = tf.cond(
max_image_size[0] > min_image_size[0],
lambda: _random_integer(min_image_size[0], max_image_size[0], seed),
lambda: max_image_size[0])
target_width = tf.cond(
max_image_size[1] > min_image_size[1],
lambda: _random_integer(min_image_size[1], max_image_size[1], seed),
lambda: max_image_size[1])
offset_height = tf.cond(
target_height > image_height,
lambda: _random_integer(0, target_height - image_height, seed),
lambda: tf.constant(0, dtype=tf.int32))
offset_width = tf.cond(
target_width > image_width,
lambda: _random_integer(0, target_width - image_width, seed),
lambda: tf.constant(0, dtype=tf.int32))
if center_pad:
offset_height = tf.cast(tf.floor((target_height - image_height) / 2),
tf.int32)
offset_width = tf.cast(tf.floor((target_width - image_width) / 2),
tf.int32)
gen_func = lambda: (target_height, target_width, offset_height, offset_width)
params = _get_or_create_preprocess_rand_vars(
gen_func, preprocessor_cache.PreprocessorCache.PAD_IMAGE,
preprocess_vars_cache)
target_height, target_width, offset_height, offset_width = params
new_image = tf.image.pad_to_bounding_box(
image,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
# Setting color of the padded pixels
image_ones = tf.ones_like(image)
image_ones_padded = tf.image.pad_to_bounding_box(
image_ones,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
image_color_padded = (1.0 - image_ones_padded) * pad_color
new_image += image_color_padded
# setting boxes
new_window = tf.cast(
tf.stack([
-offset_height, -offset_width, target_height - offset_height,
target_width - offset_width
]),
dtype=tf.float32)
new_window /= tf.cast(
tf.stack([image_height, image_width, image_height, image_width]),
dtype=tf.float32)
boxlist = box_list.BoxList(boxes)
new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window)
new_boxes = new_boxlist.get()
result = [new_image, new_boxes]
if masks is not None:
new_masks = tf.image.pad_to_bounding_box(
masks[:, :, :, tf.newaxis],
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)[:, :, :, 0]
result.append(new_masks)
if keypoints is not None:
new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window)
result.append(new_keypoints)
if densepose_surface_coords is not None:
new_densepose_surface_coords = densepose_ops.change_coordinate_frame(
densepose_surface_coords, new_window)
result.append(new_densepose_surface_coords)
return tuple(result)
def random_absolute_pad_image(image,
boxes,
masks=None,
keypoints=None,
densepose_surface_coords=None,
max_height_padding=None,
max_width_padding=None,
pad_color=None,
seed=None,
preprocess_vars_cache=None):
"""Randomly pads the image by small absolute amounts.
As random_pad_image above, but the padding is of size [0, max_height_padding]
or [0, max_width_padding] instead of padding to a fixed size of
max_height_padding for all images.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[N, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[N, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
densepose_surface_coords: (optional) rank 3 float32 tensor with shape
[N, num_points, 4]. The DensePose coordinates are
of the form (y, x, v, u) where (y, x) are the
normalized image coordinates for a sampled point,
and (v, u) is the surface coordinate for the part.
max_height_padding: a scalar tf.int32 tensor denoting the maximum amount of
height padding. The padding will be chosen uniformly at
random from [0, max_height_padding).
max_width_padding: a scalar tf.int32 tensor denoting the maximum amount of
width padding. The padding will be chosen uniformly at
random from [0, max_width_padding).
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the input
image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
if masks is not None, the function also returns:
masks: rank 3 float32 tensor with shape [N, new_height, new_width]
if keypoints is not None, the function also returns:
keypoints: rank 3 float32 tensor with shape [N, num_keypoints, 2]
"""
min_image_size = tf.shape(image)[:2]
max_image_size = min_image_size + tf.cast(
[max_height_padding, max_width_padding], dtype=tf.int32)
return random_pad_image(
image,
boxes,
masks=masks,
keypoints=keypoints,
densepose_surface_coords=densepose_surface_coords,
min_image_size=min_image_size,
max_image_size=max_image_size,
pad_color=pad_color,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
def random_crop_pad_image(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3,
clip_boxes=True,
random_coef=0.0,
min_padded_size_ratio=(1.0, 1.0),
max_padded_size_ratio=(2.0, 2.0),
pad_color=None,
seed=None,
preprocess_vars_cache=None):
"""Randomly crops and pads the image.
Given an input image and its bounding boxes, this op first randomly crops
the image and then randomly pads the image with background values. Parameters
min_padded_size_ratio and max_padded_size_ratio, determine the range of the
final output image size. Specifically, the final image size will have a size
in the range of min_padded_size_ratio * tf.shape(image) and
max_padded_size_ratio * tf.shape(image). Note that these ratios are with
respect to the size of the original image, so we can't capture the same
effect easily by independently applying RandomCropImage
followed by RandomPadImage.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: rank 1 float32 containing the label weights.
label_confidences: rank 1 float32 containing the label confidences.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width.
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width.
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the randomly
cropped image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
padded_image: padded image.
padded_boxes: boxes which is the same rank as input boxes. Boxes are in
normalized form.
cropped_labels: cropped labels.
if label_weights is not None also returns:
cropped_label_weights: cropped label weights.
if multiclass_scores is not None also returns:
cropped_multiclass_scores: cropped_multiclass_scores.
"""
image_size = tf.shape(image)
image_height = image_size[0]
image_width = image_size[1]
result = random_crop_image(
image=image,
boxes=boxes,
labels=labels,
label_weights=label_weights,
label_confidences=label_confidences,
multiclass_scores=multiclass_scores,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
clip_boxes=clip_boxes,
random_coef=random_coef,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
cropped_image, cropped_boxes, cropped_labels = result[:3]
min_image_size = tf.cast(
tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) *
min_padded_size_ratio,
dtype=tf.int32)
max_image_size = tf.cast(
tf.cast(tf.stack([image_height, image_width]), dtype=tf.float32) *
max_padded_size_ratio,
dtype=tf.int32)
padded_image, padded_boxes = random_pad_image( # pylint: disable=unbalanced-tuple-unpacking
cropped_image,
cropped_boxes,
min_image_size=min_image_size,
max_image_size=max_image_size,
pad_color=pad_color,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
cropped_padded_output = (padded_image, padded_boxes, cropped_labels)
index = 3
if label_weights is not None:
cropped_label_weights = result[index]
cropped_padded_output += (cropped_label_weights,)
index += 1
if label_confidences is not None:
cropped_label_confidences = result[index]
cropped_padded_output += (cropped_label_confidences,)
index += 1
if multiclass_scores is not None:
cropped_multiclass_scores = result[index]
cropped_padded_output += (cropped_multiclass_scores,)
return cropped_padded_output
def random_crop_to_aspect_ratio(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
aspect_ratio=1.0,
overlap_thresh=0.3,
clip_boxes=True,
center_crop=False,
seed=None,
preprocess_vars_cache=None):
"""Randomly crops an image to the specified aspect ratio.
Randomly crops the a portion of the image such that the crop is of the
specified aspect ratio, and the crop is as large as possible. If the specified
aspect ratio is larger than the aspect ratio of the image, this op will
randomly remove rows from the top and bottom of the image. If the specified
aspect ratio is less than the aspect ratio of the image, this op will randomly
remove cols from the left and right of the image. If the specified aspect
ratio is the same as the aspect ratio of the image, this op will return the
image.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
aspect_ratio: the aspect ratio of cropped image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
center_crop: whether to take the center crop or a random crop.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If label_weights, masks, keypoints, or multiclass_scores is not None, the
function also returns:
label_weights: rank 1 float32 tensor with shape [num_instances].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
Raises:
ValueError: If image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('RandomCropToAspectRatio', values=[image]):
image_shape = tf.shape(image)
orig_height = image_shape[0]
orig_width = image_shape[1]
orig_aspect_ratio = tf.cast(
orig_width, dtype=tf.float32) / tf.cast(
orig_height, dtype=tf.float32)
new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)
def target_height_fn():
return tf.cast(
tf.round(tf.cast(orig_width, dtype=tf.float32) / new_aspect_ratio),
dtype=tf.int32)
target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio,
lambda: orig_height, target_height_fn)
def target_width_fn():
return tf.cast(
tf.round(tf.cast(orig_height, dtype=tf.float32) * new_aspect_ratio),
dtype=tf.int32)
target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio,
lambda: orig_width, target_width_fn)
# either offset_height = 0 and offset_width is randomly chosen from
# [0, offset_width - target_width), or else offset_width = 0 and
# offset_height is randomly chosen from [0, offset_height - target_height)
if center_crop:
offset_height = tf.cast(tf.math.floor((orig_height - target_height) / 2),
tf.int32)
offset_width = tf.cast(tf.math.floor((orig_width - target_width) / 2),
tf.int32)
else:
offset_height = _random_integer(0, orig_height - target_height + 1, seed)
offset_width = _random_integer(0, orig_width - target_width + 1, seed)
generator_func = lambda: (offset_height, offset_width)
offset_height, offset_width = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.CROP_TO_ASPECT_RATIO,
preprocess_vars_cache)
new_image = tf.image.crop_to_bounding_box(
image, offset_height, offset_width, target_height, target_width)
im_box = tf.stack([
tf.cast(offset_height, dtype=tf.float32) /
tf.cast(orig_height, dtype=tf.float32),
tf.cast(offset_width, dtype=tf.float32) /
tf.cast(orig_width, dtype=tf.float32),
tf.cast(offset_height + target_height, dtype=tf.float32) /
tf.cast(orig_height, dtype=tf.float32),
tf.cast(offset_width + target_width, dtype=tf.float32) /
tf.cast(orig_width, dtype=tf.float32)
])
boxlist = box_list.BoxList(boxes)
boxlist.add_field('labels', labels)
boxlist.add_field('label_weights', label_weights)
if label_confidences is not None:
boxlist.add_field('label_confidences', label_confidences)
if multiclass_scores is not None:
boxlist.add_field('multiclass_scores', multiclass_scores)
im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0))
# remove boxes whose overlap with the image is less than overlap_thresh
overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(
boxlist, im_boxlist, overlap_thresh)
# change the coordinate of the remaining boxes
new_labels = overlapping_boxlist.get_field('labels')
new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,
im_box)
if clip_boxes:
new_boxlist = box_list_ops.clip_to_window(
new_boxlist, tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32))
new_boxes = new_boxlist.get()
result = [new_image, new_boxes, new_labels]
new_label_weights = overlapping_boxlist.get_field('label_weights')
result.append(new_label_weights)
if label_confidences is not None:
new_label_confidences = (
overlapping_boxlist.get_field('label_confidences'))
result.append(new_label_confidences)
if multiclass_scores is not None:
new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores')
result.append(new_multiclass_scores)
if masks is not None:
masks_inside_window = tf.gather(masks, keep_ids)
masks_box_begin = tf.stack([0, offset_height, offset_width])
masks_box_size = tf.stack([-1, target_height, target_width])
new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size)
result.append(new_masks)
if keypoints is not None:
keypoints_inside_window = tf.gather(keypoints, keep_ids)
new_keypoints = keypoint_ops.change_coordinate_frame(
keypoints_inside_window, im_box)
if clip_boxes:
new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,
[0.0, 0.0, 1.0, 1.0])
result.append(new_keypoints)
return tuple(result)
def random_pad_to_aspect_ratio(image,
boxes,
masks=None,
keypoints=None,
aspect_ratio=1.0,
min_padded_size_ratio=(1.0, 1.0),
max_padded_size_ratio=(2.0, 2.0),
seed=None,
preprocess_vars_cache=None):
"""Randomly zero pads an image to the specified aspect ratio.
Pads the image so that the resulting image will have the specified aspect
ratio without scaling less than the min_padded_size_ratio or more than the
max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio
is lower than what is possible to maintain the aspect ratio, then this method
will use the least padding to achieve the specified aspect ratio.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
aspect_ratio: aspect ratio of the final image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width.
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If masks, or keypoints is not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: If image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('RandomPadToAspectRatio', values=[image]):
image_shape = tf.shape(image)
image_height = tf.cast(image_shape[0], dtype=tf.float32)
image_width = tf.cast(image_shape[1], dtype=tf.float32)
image_aspect_ratio = image_width / image_height
new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)
target_height = tf.cond(
image_aspect_ratio <= new_aspect_ratio,
lambda: image_height,
lambda: image_width / new_aspect_ratio)
target_width = tf.cond(
image_aspect_ratio >= new_aspect_ratio,
lambda: image_width,
lambda: image_height * new_aspect_ratio)
min_height = tf.maximum(
min_padded_size_ratio[0] * image_height, target_height)
min_width = tf.maximum(
min_padded_size_ratio[1] * image_width, target_width)
max_height = tf.maximum(
max_padded_size_ratio[0] * image_height, target_height)
max_width = tf.maximum(
max_padded_size_ratio[1] * image_width, target_width)
max_scale = tf.minimum(max_height / target_height, max_width / target_width)
min_scale = tf.minimum(
max_scale,
tf.maximum(min_height / target_height, min_width / target_width))
generator_func = functools.partial(tf.random_uniform, [],
min_scale, max_scale, seed=seed)
scale = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.PAD_TO_ASPECT_RATIO,
preprocess_vars_cache)
target_height = tf.round(scale * target_height)
target_width = tf.round(scale * target_width)
new_image = tf.image.pad_to_bounding_box(
image, 0, 0, tf.cast(target_height, dtype=tf.int32),
tf.cast(target_width, dtype=tf.int32))
im_box = tf.stack([
0.0,
0.0,
target_height / image_height,
target_width / image_width
])
boxlist = box_list.BoxList(boxes)
new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box)
new_boxes = new_boxlist.get()
result = [new_image, new_boxes]
if masks is not None:
new_masks = tf.expand_dims(masks, -1)
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, tf.cast(target_height, dtype=tf.int32),
tf.cast(target_width, dtype=tf.int32))
new_masks = tf.squeeze(new_masks, [-1])
result.append(new_masks)
if keypoints is not None:
new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box)
result.append(new_keypoints)
return tuple(result)
def random_black_patches(image,
max_black_patches=10,
probability=0.5,
size_to_image_ratio=0.1,
random_seed=None,
preprocess_vars_cache=None):
"""Randomly adds some black patches to the image.
This op adds up to max_black_patches square black patches of a fixed size
to the image where size is specified via the size_to_image_ratio parameter.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
max_black_patches: number of times that the function tries to add a
black box to the image.
probability: at each try, what is the chance of adding a box.
size_to_image_ratio: Determines the ratio of the size of the black patches
to the size of the image.
box_size = size_to_image_ratio *
min(image_width, image_height)
random_seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image
"""
def add_black_patch_to_image(image, idx):
"""Function for adding one patch to the image.
Args:
image: image
idx: counter for number of patches that could have been added
Returns:
image with a randomly added black box
"""
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
box_size = tf.cast(
tf.multiply(
tf.minimum(
tf.cast(image_height, dtype=tf.float32),
tf.cast(image_width, dtype=tf.float32)), size_to_image_ratio),
dtype=tf.int32)
generator_func = functools.partial(tf.random_uniform, [], minval=0.0,
maxval=(1.0 - size_to_image_ratio),
seed=random_seed)
normalized_y_min = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH,
preprocess_vars_cache, key=str(idx) + 'y')
normalized_x_min = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH,
preprocess_vars_cache, key=str(idx) + 'x')
y_min = tf.cast(
normalized_y_min * tf.cast(image_height, dtype=tf.float32),
dtype=tf.int32)
x_min = tf.cast(
normalized_x_min * tf.cast(image_width, dtype=tf.float32),
dtype=tf.int32)
black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32)
mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min,
image_height, image_width)
image = tf.multiply(image, mask)
return image
with tf.name_scope('RandomBlackPatchInImage', values=[image]):
for idx in range(max_black_patches):
generator_func = functools.partial(tf.random_uniform, [],
minval=0.0, maxval=1.0,
dtype=tf.float32, seed=random_seed)
random_prob = _get_or_create_preprocess_rand_vars(
generator_func,
preprocessor_cache.PreprocessorCache.BLACK_PATCHES,
preprocess_vars_cache, key=idx)
image = tf.cond(
tf.greater(random_prob, probability), lambda: image,
functools.partial(add_black_patch_to_image, image=image, idx=idx))
return image
def random_jpeg_quality(image,
min_jpeg_quality=0,
max_jpeg_quality=100,
random_coef=0.0,
seed=None,
preprocess_vars_cache=None):
"""Randomly encode the image to a random JPEG quality level.
Args:
image: rank 3 float32 tensor with shape [height, width, channels] and
values in the range [0, 255].
min_jpeg_quality: An int for the lower bound for selecting a random jpeg
quality level.
max_jpeg_quality: An int for the upper bound for selecting a random jpeg
quality level.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the encoded image,
and if it is 1.0, we will always get the original image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this function is called
multiple times with the same non-null cache, it will perform
deterministically.
Returns:
image: image which is the same shape as input image.
"""
def _adjust_jpeg_quality():
"""Encodes the image as jpeg with a random quality and then decodes."""
generator_func = functools.partial(
tf.random_uniform, [],
minval=min_jpeg_quality,
maxval=max_jpeg_quality,
dtype=tf.int32,
seed=seed)
quality = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY,
preprocess_vars_cache, key='quality')
# Need to convert to uint8 before calling adjust_jpeg_quality since it
# assumes that float features are in the range [0, 1], where herein the
# range is [0, 255].
image_uint8 = tf.cast(image, tf.uint8)
adjusted_image = tf.image.adjust_jpeg_quality(image_uint8, quality)
return tf.cast(adjusted_image, tf.float32)
with tf.name_scope('RandomJpegQuality', values=[image]):
generator_func = functools.partial(tf.random_uniform, [], seed=seed)
do_encoding_random = _get_or_create_preprocess_rand_vars(
generator_func, preprocessor_cache.PreprocessorCache.JPEG_QUALITY,
preprocess_vars_cache)
do_encoding_random = tf.greater_equal(do_encoding_random, random_coef)
image = tf.cond(do_encoding_random, _adjust_jpeg_quality,
lambda: tf.cast(image, tf.float32))
return image
def random_downscale_to_target_pixels(image,
masks=None,
min_target_pixels=300000,
max_target_pixels=800000,
random_coef=0.0,
seed=None,
preprocess_vars_cache=None):
"""Randomly downscales the image to a target number of pixels.
If the image contains less than the chosen target number of pixels, it will
not be downscaled.
Args:
image: Rank 3 float32 tensor with shape [height, width, channels] and
values in the range [0, 255].
masks: (optional) Rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks are of
the same height, width as the input `image`.
min_target_pixels: Integer. An inclusive lower bound for for the target
number of pixels.
max_target_pixels: Integer. An exclusive upper bound for for the target
number of pixels.
random_coef: Float. Random coefficient that defines the chance of getting
the original image. If random_coef is 0, we will always apply downscaling,
and if it is 1.0, we will always get the original image.
seed: (optional) Integer. Random seed.
preprocess_vars_cache: (optional) PreprocessorCache object that records
previously performed augmentations. Updated in-place. If this function is
called multiple times with the same non-null cache, it will perform
deterministically.
Returns:
Tuple with elements:
image: Resized image which is the same rank as input image.
masks: If masks is not None, resized masks which are the same rank as
the input masks.
Raises:
ValueError: If min_target_pixels or max_target_pixels are not positive.
"""
if min_target_pixels <= 0:
raise ValueError('Minimum target pixels must be positive')
if max_target_pixels <= 0:
raise ValueError('Maximum target pixels must be positive')
def _resize_image_to_target(target_height, target_width):
# pylint: disable=unbalanced-tuple-unpacking
new_image, _ = resize_image(image, None, target_height, target_width)
return (new_image,)
def _resize_image_and_masks_to_target(target_height, target_width):
# pylint: disable=unbalanced-tuple-unpacking
new_image, new_masks, _ = resize_image(image, masks, target_height,
target_width)
return new_image, new_masks
with tf.name_scope('RandomDownscaleToTargetPixels', values=[image]):
generator_fn = functools.partial(tf.random_uniform, [], seed=seed)
do_downscale_random = _get_or_create_preprocess_rand_vars(
generator_fn,
preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS,
preprocess_vars_cache)
do_downscale_random = tf.greater_equal(do_downscale_random, random_coef)
generator_fn = functools.partial(
tf.random_uniform, [],
minval=min_target_pixels,
maxval=max_target_pixels,
dtype=tf.int32,
seed=seed)
target_pixels = _get_or_create_preprocess_rand_vars(
generator_fn,
preprocessor_cache.PreprocessorCache.DOWNSCALE_TO_TARGET_PIXELS,
preprocess_vars_cache,
key='target_pixels')
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
image_pixels = image_height * image_width
scale_factor = tf.sqrt(
tf.cast(target_pixels, dtype=tf.float32) /
tf.cast(image_pixels, dtype=tf.float32))
target_height = tf.cast(
scale_factor * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32)
target_width = tf.cast(
scale_factor * tf.cast(image_width, dtype=tf.float32), dtype=tf.int32)
image_larger_than_target = tf.greater(image_pixels, target_pixels)
should_apply_resize = tf.logical_and(do_downscale_random,
image_larger_than_target)
if masks is not None:
resize_fn = functools.partial(_resize_image_and_masks_to_target,
target_height, target_width)
return tf.cond(should_apply_resize, resize_fn,
lambda: (tf.cast(image, dtype=tf.float32), masks))
else:
resize_fn = lambda: _resize_image_to_target(target_height, target_width)
return tf.cond(should_apply_resize, resize_fn,
lambda: (tf.cast(image, dtype=tf.float32),))
def random_patch_gaussian(image,
min_patch_size=1,
max_patch_size=250,
min_gaussian_stddev=0.0,
max_gaussian_stddev=1.0,
random_coef=0.0,
seed=None,
preprocess_vars_cache=None):
"""Randomly applies gaussian noise to a random patch on the image.
The gaussian noise is applied to the image with values scaled to the range
[0.0, 1.0]. The result of applying gaussian noise to the scaled image is
clipped to be within the range [0.0, 1.0], equivalent to the range
[0.0, 255.0] after rescaling the image back.
See "Improving Robustness Without Sacrificing Accuracy with Patch Gaussian
Augmentation " by Lopes et al., 2019, for further details.
https://arxiv.org/abs/1906.02611
Args:
image: Rank 3 float32 tensor with shape [height, width, channels] and
values in the range [0.0, 255.0].
min_patch_size: Integer. An inclusive lower bound for the patch size.
max_patch_size: Integer. An exclusive upper bound for the patch size.
min_gaussian_stddev: Float. An inclusive lower bound for the standard
deviation of the gaussian noise.
max_gaussian_stddev: Float. An exclusive upper bound for the standard
deviation of the gaussian noise.
random_coef: Float. Random coefficient that defines the chance of getting
the original image. If random_coef is 0.0, we will always apply
downscaling, and if it is 1.0, we will always get the original image.
seed: (optional) Integer. Random seed.
preprocess_vars_cache: (optional) PreprocessorCache object that records
previously performed augmentations. Updated in-place. If this function is
called multiple times with the same non-null cache, it will perform
deterministically.
Returns:
Rank 3 float32 tensor with same shape as the input image and with gaussian
noise applied within a random patch.
Raises:
ValueError: If min_patch_size is < 1.
"""
if min_patch_size < 1:
raise ValueError('Minimum patch size must be >= 1.')
get_or_create_rand_vars_fn = functools.partial(
_get_or_create_preprocess_rand_vars,
function_id=preprocessor_cache.PreprocessorCache.PATCH_GAUSSIAN,
preprocess_vars_cache=preprocess_vars_cache)
def _apply_patch_gaussian(image):
"""Applies a patch gaussian with random size, location, and stddev."""
patch_size = get_or_create_rand_vars_fn(
functools.partial(
tf.random_uniform, [],
minval=min_patch_size,
maxval=max_patch_size,
dtype=tf.int32,
seed=seed),
key='patch_size')
gaussian_stddev = get_or_create_rand_vars_fn(
functools.partial(
tf.random_uniform, [],
minval=min_gaussian_stddev,
maxval=max_gaussian_stddev,
dtype=tf.float32,
seed=seed),
key='gaussian_stddev')
image_shape = tf.shape(image)
y = get_or_create_rand_vars_fn(
functools.partial(
tf.random_uniform, [],
minval=0,
maxval=image_shape[0],
dtype=tf.int32,
seed=seed),
key='y')
x = get_or_create_rand_vars_fn(
functools.partial(
tf.random_uniform, [],
minval=0,
maxval=image_shape[1],
dtype=tf.int32,
seed=seed),
key='x')
gaussian = get_or_create_rand_vars_fn(
functools.partial(
tf.random.normal,
image_shape,
stddev=gaussian_stddev,
dtype=tf.float32,
seed=seed),
key='gaussian')
scaled_image = image / 255.0
image_plus_gaussian = tf.clip_by_value(scaled_image + gaussian, 0.0, 1.0)
patch_mask = patch_ops.get_patch_mask(y, x, patch_size, image_shape)
patch_mask = tf.expand_dims(patch_mask, -1)
patch_mask = tf.tile(patch_mask, [1, 1, image_shape[2]])
patched_image = tf.where(patch_mask, image_plus_gaussian, scaled_image)
return patched_image * 255.0
with tf.name_scope('RandomPatchGaussian', values=[image]):
image = tf.cast(image, tf.float32)
patch_gaussian_random = get_or_create_rand_vars_fn(
functools.partial(tf.random_uniform, [], seed=seed))
do_patch_gaussian = tf.greater_equal(patch_gaussian_random, random_coef)
image = tf.cond(do_patch_gaussian,
lambda: _apply_patch_gaussian(image),
lambda: image)
return image
def autoaugment_image(image, boxes, policy_name='v0'):
"""Apply an autoaugment policy to the image and boxes.
See "AutoAugment: Learning Augmentation Policies from Data" by Cubuk et al.,
2018, for further details. https://arxiv.org/abs/1805.09501
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
policy_name: The name of the AutoAugment policy to use. The available
options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
image: the augmented image.
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form. boxes will have been augmented along with image.
"""
return autoaugment_utils.distort_image_with_autoaugment(
image, boxes, policy_name)
def image_to_float(image):
"""Used in Faster R-CNN. Casts image pixel values to float.
Args:
image: input image which might be in tf.uint8 or sth else format
Returns:
image: image in tf.float32 format.
"""
with tf.name_scope('ImageToFloat', values=[image]):
image = tf.cast(image, dtype=tf.float32)
return image
def random_resize_method(image, target_size, preprocess_vars_cache=None):
"""Uses a random resize method to resize the image to target size.
Args:
image: a rank 3 tensor.
target_size: a list of [target_height, target_width]
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
resized image.
"""
resized_image = _apply_with_random_selector(
image,
lambda x, method: tf.image.resize_images(x, target_size, method),
num_cases=4,
preprocess_vars_cache=preprocess_vars_cache,
key=preprocessor_cache.PreprocessorCache.RESIZE_METHOD)
return resized_image
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False,
per_channel_pad_value=(0, 0, 0)):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros
so the resulting image is of the spatial size
[max_dimension, max_dimension]. If masks are included they are padded
similarly.
per_channel_pad_value: A tuple of per-channel scalar value to use for
padding. By default pads zeros.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
def _resize_landscape_image(image):
# resize a landscape image
return tf.image.resize_images(
image, tf.stack([min_dimension, max_dimension]), method=method,
align_corners=align_corners, preserve_aspect_ratio=True)
def _resize_portrait_image(image):
# resize a portrait image
return tf.image.resize_images(
image, tf.stack([max_dimension, min_dimension]), method=method,
align_corners=align_corners, preserve_aspect_ratio=True)
with tf.name_scope('ResizeToRange', values=[image, min_dimension]):
if image.get_shape().is_fully_defined():
if image.get_shape()[0] < image.get_shape()[1]:
new_image = _resize_landscape_image(image)
else:
new_image = _resize_portrait_image(image)
new_size = tf.constant(new_image.get_shape().as_list())
else:
new_image = tf.cond(
tf.less(tf.shape(image)[0], tf.shape(image)[1]),
lambda: _resize_landscape_image(image),
lambda: _resize_portrait_image(image))
new_size = tf.shape(new_image)
if pad_to_max_dimension:
channels = tf.unstack(new_image, axis=2)
if len(channels) != len(per_channel_pad_value):
raise ValueError('Number of channels must be equal to the length of '
'per-channel pad value.')
new_image = tf.stack(
[
tf.pad( # pylint: disable=g-complex-comprehension
channels[i], [[0, max_dimension - new_size[0]],
[0, max_dimension - new_size[1]]],
constant_values=per_channel_pad_value[i])
for i in range(len(channels))
],
axis=2)
new_image.set_shape([max_dimension, max_dimension, len(channels)])
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_images(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, max_dimension, max_dimension)
new_masks = tf.squeeze(new_masks, 3)
result.append(new_masks)
result.append(new_size)
return result
def _get_image_info(image):
"""Returns the height, width and number of channels in the image."""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
num_channels = tf.shape(image)[2]
return (image_height, image_width, num_channels)
# TODO(alirezafathi): Make sure the static shapes are preserved.
def resize_to_min_dimension(image, masks=None, min_dimension=600,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image and masks given the min size maintaining the aspect ratio.
If one of the image dimensions is smaller than min_dimension, it will scale
the image such that its smallest dimension is equal to min_dimension.
Otherwise, will keep the image size as is.
Args:
image: a tensor of size [height, width, channels].
masks: (optional) a tensors of size [num_instances, height, width].
min_dimension: minimum image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
Returns:
An array containing resized_image, resized_masks, and resized_image_shape.
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A tensor of size [new_height, new_width, channels].
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width]
resized_image_shape: A 1D tensor of shape [3] containing the shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]):
(image_height, image_width, num_channels) = _get_image_info(image)
min_image_dimension = tf.minimum(image_height, image_width)
min_target_dimension = tf.maximum(min_image_dimension, min_dimension)
target_ratio = tf.cast(min_target_dimension, dtype=tf.float32) / tf.cast(
min_image_dimension, dtype=tf.float32)
target_height = tf.cast(
tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32)
target_width = tf.cast(
tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32)
image = tf.image.resize_images(
tf.expand_dims(image, axis=0), size=[target_height, target_width],
method=method,
align_corners=True)
result = [tf.squeeze(image, axis=0)]
if masks is not None:
masks = tf.image.resize_nearest_neighbor(
tf.expand_dims(masks, axis=3),
size=[target_height, target_width],
align_corners=True)
result.append(tf.squeeze(masks, axis=3))
result.append(tf.stack([target_height, target_width, num_channels]))
return result
def resize_to_max_dimension(image, masks=None, max_dimension=600,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image and masks given the max size maintaining the aspect ratio.
If one of the image dimensions is greater than max_dimension, it will scale
the image such that its largest dimension is equal to max_dimension.
Otherwise, will keep the image size as is.
Args:
image: a tensor of size [height, width, channels].
masks: (optional) a tensors of size [num_instances, height, width].
max_dimension: maximum image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
Returns:
An array containing resized_image, resized_masks, and resized_image_shape.
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A tensor of size [new_height, new_width, channels].
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width]
resized_image_shape: A 1D tensor of shape [3] containing the shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeGivenMaxDimension', values=[image, max_dimension]):
(image_height, image_width, num_channels) = _get_image_info(image)
max_image_dimension = tf.maximum(image_height, image_width)
max_target_dimension = tf.minimum(max_image_dimension, max_dimension)
target_ratio = tf.cast(max_target_dimension, dtype=tf.float32) / tf.cast(
max_image_dimension, dtype=tf.float32)
target_height = tf.cast(
tf.cast(image_height, dtype=tf.float32) * target_ratio, dtype=tf.int32)
target_width = tf.cast(
tf.cast(image_width, dtype=tf.float32) * target_ratio, dtype=tf.int32)
image = tf.image.resize_images(
tf.expand_dims(image, axis=0), size=[target_height, target_width],
method=method,
align_corners=True)
result = [tf.squeeze(image, axis=0)]
if masks is not None:
masks = tf.image.resize_nearest_neighbor(
tf.expand_dims(masks, axis=3),
size=[target_height, target_width],
align_corners=True)
result.append(tf.squeeze(masks, axis=3))
result.append(tf.stack([target_height, target_width, num_channels]))
return result
def resize_pad_to_multiple(image, masks=None, multiple=1):
"""Resize an image by zero padding it to the specified multiple.
For example, with an image of size (101, 199, 3) and multiple=4,
the returned image will have shape (104, 200, 3).
Args:
image: a tensor of shape [height, width, channels]
masks: (optional) a tensor of shape [num_instances, height, width]
multiple: int, the multiple to which the height and width of the input
will be padded.
Returns:
resized_image: The image with 0 padding applied, such that output
dimensions are divisible by `multiple`
resized_masks: If masks are given, they are resized to the same
spatial dimensions as the image.
resized_image_shape: An integer tensor of shape [3] which holds
the shape of the input image.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizePadToMultiple', values=[image, multiple]):
image_height, image_width, num_channels = _get_image_info(image)
image = image[tf.newaxis, :, :, :]
image = ops.pad_to_multiple(image, multiple)[0, :, :, :]
result = [image]
if masks is not None:
masks = tf.transpose(masks, (1, 2, 0))
masks = masks[tf.newaxis, :, :, :]
masks = ops.pad_to_multiple(masks, multiple)[0, :, :, :]
masks = tf.transpose(masks, (2, 0, 1))
result.append(masks)
result.append(tf.stack([image_height, image_width, num_channels]))
return result
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
# TODO(alirezafathi): Investigate if instead the function should return None if
# masks is None.
# pylint: disable=g-doc-return-or-yield
def resize_image(image,
masks=None,
new_height=600,
new_width=1024,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False):
"""Resizes images to the given height and width.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
new_height: (optional) (scalar) desired height of the image.
new_width: (optional) (scalar) desired width of the image.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A tensor of size [new_height, new_width, channels].
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width]
resized_image_shape: A 1D tensor of shape [3] containing the shape of the
resized image.
"""
with tf.name_scope(
'ResizeImage',
values=[image, new_height, new_width, method, align_corners]):
new_image = tf.image.resize_images(
image, tf.stack([new_height, new_width]),
method=method,
align_corners=align_corners)
image_shape = shape_utils.combined_static_and_dynamic_shape(image)
result = [new_image]
if masks is not None:
num_instances = tf.shape(masks)[0]
new_size = tf.stack([new_height, new_width])
def resize_masks_branch():
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_nearest_neighbor(
new_masks, new_size, align_corners=align_corners)
new_masks = tf.squeeze(new_masks, axis=3)
return new_masks
def reshape_masks_branch():
# The shape function will be computed for both branches of the
# condition, regardless of which branch is actually taken. Make sure
# that we don't trigger an assertion in the shape function when trying
# to reshape a non empty tensor into an empty one.
new_masks = tf.reshape(masks, [-1, new_size[0], new_size[1]])
return new_masks
masks = tf.cond(num_instances > 0, resize_masks_branch,
reshape_masks_branch)
result.append(masks)
result.append(tf.stack([new_height, new_width, image_shape[2]]))
return result
def subtract_channel_mean(image, means=None):
"""Normalizes an image by subtracting a mean from each channel.
Args:
image: A 3D tensor of shape [height, width, channels]
means: float list containing a mean for each channel
Returns:
normalized_images: a tensor of shape [height, width, channels]
Raises:
ValueError: if images is not a 4D tensor or if the number of means is not
equal to the number of channels.
"""
with tf.name_scope('SubtractChannelMean', values=[image, means]):
if len(image.get_shape()) != 3:
raise ValueError('Input must be of size [height, width, channels]')
if len(means) != image.get_shape()[-1]:
raise ValueError('len(means) must match the number of channels')
return image - [[means]]
def one_hot_encoding(labels, num_classes=None):
"""One-hot encodes the multiclass labels.
Example usage:
labels = tf.constant([1, 4], dtype=tf.int32)
one_hot = OneHotEncoding(labels, num_classes=5)
one_hot.eval() # evaluates to [0, 1, 0, 0, 1]
Args:
labels: A tensor of shape [None] corresponding to the labels.
num_classes: Number of classes in the dataset.
Returns:
onehot_labels: a tensor of shape [num_classes] corresponding to the one hot
encoding of the labels.
Raises:
ValueError: if num_classes is not specified.
"""
with tf.name_scope('OneHotEncoding', values=[labels]):
if num_classes is None:
raise ValueError('num_classes must be specified')
labels = tf.one_hot(labels, num_classes, 1, 0)
return tf.reduce_max(labels, 0)
def rgb_to_gray(image):
"""Converts a 3 channel RGB image to a 1 channel grayscale image.
Args:
image: Rank 3 float32 tensor containing 1 image -> [height, width, 3]
with pixel values varying between [0, 1].
Returns:
image: A single channel grayscale image -> [image, height, 1].
"""
return _rgb_to_grayscale(image)
def random_self_concat_image(
image, boxes, labels, label_weights, label_confidences=None,
multiclass_scores=None, concat_vertical_probability=0.1,
concat_horizontal_probability=0.1, seed=None,
preprocess_vars_cache=None):
"""Randomly concatenates the image with itself.
This function randomly concatenates the image with itself; the random
variables for vertical and horizontal concatenation are independent.
Afterwards, we adjust the old bounding boxes, and add new bounding boxes
for the new objects.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: rank 1 float32 containing the label weights.
label_confidences: (optional) rank 1 float32 containing the label
confidences.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for
each box for each class.
concat_vertical_probability: (optional) a tf.float32 scalar denoting the
probability of a vertical concatenation.
concat_horizontal_probability: (optional) a tf.float32 scalar denoting the
probability of a horizontal concatenation.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
if label_confidences is not None also returns:
maybe_concat_label_confidences: cropped label weights.
if multiclass_scores is not None also returns:
maybe_concat_multiclass_scores: cropped_multiclass_scores.
"""
concat_vertical = (tf.random_uniform([], seed=seed) <
concat_vertical_probability)
# Note the seed + 1 so we get some semblance of independence even with
# fixed seeds.
concat_horizontal = (tf.random_uniform([], seed=seed + 1 if seed else None)
< concat_horizontal_probability)
gen_func = lambda: (concat_vertical, concat_horizontal)
params = _get_or_create_preprocess_rand_vars(
gen_func, preprocessor_cache.PreprocessorCache.SELF_CONCAT_IMAGE,
preprocess_vars_cache)
concat_vertical, concat_horizontal = params
def _concat_image(image, boxes, labels, label_weights, axis):
"""Concats the image to itself on `axis`."""
output_images = tf.concat([image, image], axis=axis)
if axis == 0:
# Concat vertically, so need to reduce the y coordinates.
old_scaling = tf.constant([0.5, 1.0, 0.5, 1.0])
new_translation = tf.constant([0.5, 0.0, 0.5, 0.0])
elif axis == 1:
old_scaling = tf.constant([1.0, 0.5, 1.0, 0.5])
new_translation = tf.constant([0.0, 0.5, 0.0, 0.5])
old_boxes = old_scaling * boxes
new_boxes = old_boxes + new_translation
all_boxes = tf.concat([old_boxes, new_boxes], axis=0)
return [output_images, all_boxes, tf.tile(labels, [2]), tf.tile(
label_weights, [2])]
image, boxes, labels, label_weights = tf.cond(
concat_vertical,
lambda: _concat_image(image, boxes, labels, label_weights, axis=0),
lambda: [image, boxes, labels, label_weights],
strict=True)
outputs = tf.cond(
concat_horizontal,
lambda: _concat_image(image, boxes, labels, label_weights, axis=1),
lambda: [image, boxes, labels, label_weights],
strict=True)
if label_confidences is not None:
label_confidences = tf.cond(concat_vertical,
lambda: tf.tile(label_confidences, [2]),
lambda: label_confidences)
outputs.append(tf.cond(concat_horizontal,
lambda: tf.tile(label_confidences, [2]),
lambda: label_confidences))
if multiclass_scores is not None:
multiclass_scores = tf.cond(concat_vertical,
lambda: tf.tile(multiclass_scores, [2, 1]),
lambda: multiclass_scores)
outputs.append(tf.cond(concat_horizontal,
lambda: tf.tile(multiclass_scores, [2, 1]),
lambda: multiclass_scores))
return outputs
def ssd_random_crop(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio_range=((0.5, 2.0),) * 7,
area_range=((0.1, 1.0),) * 7,
overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
clip_boxes=(True,) * 7,
random_coef=(0.15,) * 7,
seed=None,
preprocess_vars_cache=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: rank 1 float32 tensor containing the weights.
label_confidences: rank 1 float32 tensor containing the confidences.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If label_weights, multiclass_scores, masks, or keypoints is not None, the
function also returns:
label_weights: rank 1 float32 tensor with shape [num_instances].
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
def random_crop_selector(selected_result, index):
"""Applies random_crop_image to selected result.
Args:
selected_result: A tuple containing image, boxes, labels, keypoints (if
not None), and masks (if not None).
index: The index that was randomly selected.
Returns: A tuple containing image, boxes, labels, keypoints (if not None),
and masks (if not None).
"""
i = 3
image, boxes, labels = selected_result[:i]
selected_label_weights = None
selected_label_confidences = None
selected_multiclass_scores = None
selected_masks = None
selected_keypoints = None
if label_weights is not None:
selected_label_weights = selected_result[i]
i += 1
if label_confidences is not None:
selected_label_confidences = selected_result[i]
i += 1
if multiclass_scores is not None:
selected_multiclass_scores = selected_result[i]
i += 1
if masks is not None:
selected_masks = selected_result[i]
i += 1
if keypoints is not None:
selected_keypoints = selected_result[i]
return random_crop_image(
image=image,
boxes=boxes,
labels=labels,
label_weights=selected_label_weights,
label_confidences=selected_label_confidences,
multiclass_scores=selected_multiclass_scores,
masks=selected_masks,
keypoints=selected_keypoints,
min_object_covered=min_object_covered[index],
aspect_ratio_range=aspect_ratio_range[index],
area_range=area_range[index],
overlap_thresh=overlap_thresh[index],
clip_boxes=clip_boxes[index],
random_coef=random_coef[index],
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
result = _apply_with_random_selector_tuples(
tuple(
t for t in (image, boxes, labels, label_weights, label_confidences,
multiclass_scores, masks, keypoints) if t is not None),
random_crop_selector,
num_cases=len(min_object_covered),
preprocess_vars_cache=preprocess_vars_cache,
key=preprocessor_cache.PreprocessorCache.SSD_CROP_SELECTOR_ID)
return result
def ssd_random_crop_pad(image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio_range=((0.5, 2.0),) * 6,
area_range=((0.1, 1.0),) * 6,
overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
clip_boxes=(True,) * 6,
random_coef=(0.15,) * 6,
min_padded_size_ratio=((1.0, 1.0),) * 6,
max_padded_size_ratio=((2.0, 2.0),) * 6,
pad_color=(None,) * 6,
seed=None,
preprocess_vars_cache=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: float32 tensor of shape [num_instances] representing the
confidences for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width.
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width.
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the randomly
cropped image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
new_labels: new labels.
new_label_weights: new label weights.
"""
def random_crop_pad_selector(image_boxes_labels, index):
"""Random crop preprocessing helper."""
i = 3
image, boxes, labels = image_boxes_labels[:i]
selected_label_weights = None
selected_label_confidences = None
selected_multiclass_scores = None
if label_weights is not None:
selected_label_weights = image_boxes_labels[i]
i += 1
if label_confidences is not None:
selected_label_confidences = image_boxes_labels[i]
i += 1
if multiclass_scores is not None:
selected_multiclass_scores = image_boxes_labels[i]
return random_crop_pad_image(
image,
boxes,
labels,
label_weights=selected_label_weights,
label_confidences=selected_label_confidences,
multiclass_scores=selected_multiclass_scores,
min_object_covered=min_object_covered[index],
aspect_ratio_range=aspect_ratio_range[index],
area_range=area_range[index],
overlap_thresh=overlap_thresh[index],
clip_boxes=clip_boxes[index],
random_coef=random_coef[index],
min_padded_size_ratio=min_padded_size_ratio[index],
max_padded_size_ratio=max_padded_size_ratio[index],
pad_color=pad_color[index],
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
return _apply_with_random_selector_tuples(
tuple(t for t in (image, boxes, labels, label_weights, label_confidences,
multiclass_scores) if t is not None),
random_crop_pad_selector,
num_cases=len(min_object_covered),
preprocess_vars_cache=preprocess_vars_cache,
key=preprocessor_cache.PreprocessorCache.SSD_CROP_PAD_SELECTOR_ID)
def ssd_random_crop_fixed_aspect_ratio(
image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio=1.0,
area_range=((0.1, 1.0),) * 7,
overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
clip_boxes=(True,) * 7,
random_coef=(0.15,) * 7,
seed=None,
preprocess_vars_cache=None):
"""Random crop preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
The only difference is that the aspect ratio of the crops are fixed.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidences for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio: aspect ratio of the cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If multiclass_scores, masks, or keypoints is not None, the function also
returns:
multiclass_scores: rank 2 float32 tensor with shape
[num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range)
crop_result = ssd_random_crop(
image,
boxes,
labels,
label_weights=label_weights,
label_confidences=label_confidences,
multiclass_scores=multiclass_scores,
masks=masks,
keypoints=keypoints,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
clip_boxes=clip_boxes,
random_coef=random_coef,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
i = 3
new_image, new_boxes, new_labels = crop_result[:i]
new_label_weights = None
new_label_confidences = None
new_multiclass_scores = None
new_masks = None
new_keypoints = None
if label_weights is not None:
new_label_weights = crop_result[i]
i += 1
if label_confidences is not None:
new_label_confidences = crop_result[i]
i += 1
if multiclass_scores is not None:
new_multiclass_scores = crop_result[i]
i += 1
if masks is not None:
new_masks = crop_result[i]
i += 1
if keypoints is not None:
new_keypoints = crop_result[i]
result = random_crop_to_aspect_ratio(
new_image,
new_boxes,
new_labels,
label_weights=new_label_weights,
label_confidences=new_label_confidences,
multiclass_scores=new_multiclass_scores,
masks=new_masks,
keypoints=new_keypoints,
aspect_ratio=aspect_ratio,
clip_boxes=clip_boxes,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
return result
def ssd_random_crop_pad_fixed_aspect_ratio(
image,
boxes,
labels,
label_weights,
label_confidences=None,
multiclass_scores=None,
masks=None,
keypoints=None,
min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
aspect_ratio=1.0,
aspect_ratio_range=((0.5, 2.0),) * 7,
area_range=((0.1, 1.0),) * 7,
overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),
clip_boxes=(True,) * 7,
random_coef=(0.15,) * 7,
min_padded_size_ratio=(1.0, 1.0),
max_padded_size_ratio=(2.0, 2.0),
seed=None,
preprocess_vars_cache=None):
"""Random crop and pad preprocessing with default parameters as in SSD paper.
Liu et al., SSD: Single shot multibox detector.
For further information on random crop preprocessing refer to RandomCrop
function above.
The only difference is that after the initial crop, images are zero-padded
to a fixed aspect ratio instead of being resized to that aspect ratio.
Args:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
multiclass_scores: (optional) float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio: the final aspect ratio to pad to.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
clip_boxes: whether to clip the boxes to the cropped image.
random_coef: a random coefficient that defines the chance of getting the
original image. If random_coef is 0, we will always get the
cropped image, and if it is 1.0, we will always get the
original image.
min_padded_size_ratio: min ratio of padded image height and width to the
input image's height and width.
max_padded_size_ratio: max ratio of padded image height and width to the
input image's height and width.
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If multiclass_scores, masks, or keypoints is not None, the function also
returns:
multiclass_scores: rank 2 with shape [num_instances, num_classes]
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
"""
crop_result = ssd_random_crop(
image,
boxes,
labels,
label_weights=label_weights,
label_confidences=label_confidences,
multiclass_scores=multiclass_scores,
masks=masks,
keypoints=keypoints,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
overlap_thresh=overlap_thresh,
clip_boxes=clip_boxes,
random_coef=random_coef,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
i = 3
new_image, new_boxes, new_labels = crop_result[:i]
new_label_weights = None
new_label_confidences = None
new_multiclass_scores = None
new_masks = None
new_keypoints = None
if label_weights is not None:
new_label_weights = crop_result[i]
i += 1
if label_confidences is not None:
new_label_confidences = crop_result[i]
i += 1
if multiclass_scores is not None:
new_multiclass_scores = crop_result[i]
i += 1
if masks is not None:
new_masks = crop_result[i]
i += 1
if keypoints is not None:
new_keypoints = crop_result[i]
result = random_pad_to_aspect_ratio(
new_image,
new_boxes,
masks=new_masks,
keypoints=new_keypoints,
aspect_ratio=aspect_ratio,
min_padded_size_ratio=min_padded_size_ratio,
max_padded_size_ratio=max_padded_size_ratio,
seed=seed,
preprocess_vars_cache=preprocess_vars_cache)
result = list(result)
i = 3
result.insert(2, new_labels)
if new_label_weights is not None:
result.insert(i, new_label_weights)
i += 1
if new_label_confidences is not None:
result.insert(i, new_label_confidences)
i += 1
if multiclass_scores is not None:
result.insert(i, new_multiclass_scores)
result = tuple(result)
return result
def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0):
"""Converts multiclass logits to softmax scores after applying temperature.
Args:
multiclass_scores: float32 tensor of shape
[num_instances, num_classes] representing the score for each box for each
class.
temperature: Scale factor to use prior to applying softmax. Larger
temperatures give more uniform distruibutions after softmax.
Returns:
multiclass_scores: float32 tensor of shape
[num_instances, num_classes] with scaling and softmax applied.
"""
# Multiclass scores must be stored as logits. Apply temp and softmax.
multiclass_scores_scaled = tf.multiply(
multiclass_scores, 1.0 / temperature, name='scale_logits')
multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax')
return multiclass_scores
def _get_crop_border(border, size):
border = tf.cast(border, tf.float32)
size = tf.cast(size, tf.float32)
i = tf.ceil(tf.log(2.0 * border / size) / tf.log(2.0))
divisor = tf.pow(2.0, i)
divisor = tf.clip_by_value(divisor, 1, border)
divisor = tf.cast(divisor, tf.int32)
return tf.cast(border, tf.int32) // divisor
def random_square_crop_by_scale(image, boxes, labels, label_weights,
label_confidences=None, masks=None,
keypoints=None, max_border=128, scale_min=0.6,
scale_max=1.3, num_scales=8, seed=None,
preprocess_vars_cache=None):
"""Randomly crop a square in proportion to scale and image size.
Extract a square sized crop from an image whose side length is sampled by
randomly scaling the maximum spatial dimension of the image. If part of
the crop falls outside the image, it is filled with zeros.
The augmentation is borrowed from [1]
[1]: https://arxiv.org/abs/1904.07850
Args:
image: rank 3 float32 tensor containing 1 image ->
[height, width, channels].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax].
Boxes on the crop boundary are clipped to the boundary and boxes
falling outside the crop are ignored.
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
max_border: The maximum size of the border. The border defines distance in
pixels to the image boundaries that will not be considered as a center of
a crop. To make sure that the border does not go over the center of the
image, we chose the border value by computing the minimum k, such that
(max_border / (2**k)) < image_dimension/2.
scale_min: float, the minimum value for scale.
scale_max: float, the maximum value for scale.
num_scales: int, the number of discrete scale values to sample between
[scale_min, scale_max]
seed: random seed.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
label_weights: rank 1 float32 tensor with shape [num_instances].
label_confidences: (optional) float32 tensor of shape [num_instances]
representing the confidence for each box.
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
"""
img_shape = tf.shape(image)
height, width = img_shape[0], img_shape[1]
scales = tf.linspace(scale_min, scale_max, num_scales)
scale = _get_or_create_preprocess_rand_vars(
lambda: scales[_random_integer(0, num_scales, seed)],
preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE,
preprocess_vars_cache, 'scale')
image_size = scale * tf.cast(tf.maximum(height, width), tf.float32)
image_size = tf.cast(image_size, tf.int32)
h_border = _get_crop_border(max_border, height)
w_border = _get_crop_border(max_border, width)
def y_function():
y = _random_integer(h_border,
tf.cast(height, tf.int32) - h_border + 1,
seed)
return y
def x_function():
x = _random_integer(w_border,
tf.cast(width, tf.int32) - w_border + 1,
seed)
return x
y_center = _get_or_create_preprocess_rand_vars(
y_function,
preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE,
preprocess_vars_cache, 'y_center')
x_center = _get_or_create_preprocess_rand_vars(
x_function,
preprocessor_cache.PreprocessorCache.SQUARE_CROP_BY_SCALE,
preprocess_vars_cache, 'x_center')
half_size = tf.cast(image_size / 2, tf.int32)
crop_ymin, crop_ymax = y_center - half_size, y_center + half_size
crop_xmin, crop_xmax = x_center - half_size, x_center + half_size
ymin = tf.maximum(crop_ymin, 0)
xmin = tf.maximum(crop_xmin, 0)
ymax = tf.minimum(crop_ymax, height - 1)
xmax = tf.minimum(crop_xmax, width - 1)
cropped_image = image[ymin:ymax, xmin:xmax]
offset_y = tf.maximum(0, ymin - crop_ymin)
offset_x = tf.maximum(0, xmin - crop_xmin)
oy_i = offset_y
ox_i = offset_x
output_image = tf.image.pad_to_bounding_box(
cropped_image, offset_height=oy_i, offset_width=ox_i,
target_height=image_size, target_width=image_size)
if ymin == 0:
# We might be padding the image.
box_ymin = -offset_y
else:
box_ymin = crop_ymin
if xmin == 0:
# We might be padding the image.
box_xmin = -offset_x
else:
box_xmin = crop_xmin
box_ymax = box_ymin + image_size
box_xmax = box_xmin + image_size
image_box = [box_ymin / height, box_xmin / width,
box_ymax / height, box_xmax / width]
boxlist = box_list.BoxList(boxes)
boxlist = box_list_ops.change_coordinate_frame(boxlist, image_box)
boxlist, indices = box_list_ops.prune_completely_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0])
boxlist = box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0],
filter_nonoverlapping=False)
return_values = [output_image, boxlist.get(),
tf.gather(labels, indices),
tf.gather(label_weights, indices)]
if label_confidences is not None:
return_values.append(tf.gather(label_confidences, indices))
if masks is not None:
new_masks = tf.expand_dims(masks, -1)
new_masks = new_masks[:, ymin:ymax, xmin:xmax]
new_masks = tf.image.pad_to_bounding_box(
new_masks, oy_i, ox_i, image_size, image_size)
new_masks = tf.squeeze(new_masks, [-1])
return_values.append(tf.gather(new_masks, indices))
if keypoints is not None:
keypoints = tf.gather(keypoints, indices)
keypoints = keypoint_ops.change_coordinate_frame(keypoints, image_box)
keypoints = keypoint_ops.prune_outside_window(keypoints,
[0.0, 0.0, 1.0, 1.0])
return_values.append(keypoints)
return return_values
def random_scale_crop_and_pad_to_square(
image,
boxes,
labels,
label_weights,
masks=None,
keypoints=None,
label_confidences=None,
scale_min=0.1,
scale_max=2.0,
output_size=512,
resize_method=tf.image.ResizeMethod.BILINEAR,
seed=None):
"""Randomly scale, crop, and then pad an image to fixed square dimensions.
Randomly scale, crop, and then pad an image to the desired square output
dimensions. Specifically, this method first samples a random_scale factor
from a uniform distribution between scale_min and scale_max, and then resizes
the image such that it's maximum dimension is (output_size * random_scale).
Secondly, a square output_size crop is extracted from the resized image
(note, this will only occur when random_scale > 1.0). Lastly, the cropped
region is padded to the desired square output_size, by filling with zeros.
The augmentation is borrowed from [1]
[1]: https://arxiv.org/abs/1911.09070
Args:
image: rank 3 float32 tensor containing 1 image ->
[height, width, channels].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes
are in normalized form meaning their coordinates vary between [0, 1]. Each
row is in the form of [ymin, xmin, ymax, xmax]. Boxes on the crop boundary
are clipped to the boundary and boxes falling outside the crop are
ignored.
labels: rank 1 int32 tensor containing the object classes.
label_weights: float32 tensor of shape [num_instances] representing the
weight for each box.
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks. The masks are of the same height, width
as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape [num_instances,
num_keypoints, 2]. The keypoints are in y-x normalized coordinates.
label_confidences: (optional) float32 tensor of shape [num_instance]
representing the confidence for each box.
scale_min: float, the minimum value for the random scale factor.
scale_max: float, the maximum value for the random scale factor.
output_size: int, the desired (square) output image size.
resize_method: tf.image.ResizeMethod, resize method to use when scaling the
input images.
seed: random seed.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
label_weights: rank 1 float32 tensor with shape [num_instances].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
label_confidences: confidences for retained boxes.
"""
img_shape = tf.shape(image)
input_height, input_width = img_shape[0], img_shape[1]
random_scale = tf.random_uniform([], scale_min, scale_max, seed=seed)
# Compute the scaled height and width from the random scale.
max_input_dim = tf.cast(tf.maximum(input_height, input_width), tf.float32)
input_ar_y = tf.cast(input_height, tf.float32) / max_input_dim
input_ar_x = tf.cast(input_width, tf.float32) / max_input_dim
scaled_height = tf.cast(random_scale * output_size * input_ar_y, tf.int32)
scaled_width = tf.cast(random_scale * output_size * input_ar_x, tf.int32)
# Compute the offsets:
offset_y = tf.cast(scaled_height - output_size, tf.float32)
offset_x = tf.cast(scaled_width - output_size, tf.float32)
offset_y = tf.maximum(0.0, offset_y) * tf.random_uniform([], 0, 1, seed=seed)
offset_x = tf.maximum(0.0, offset_x) * tf.random_uniform([], 0, 1, seed=seed)
offset_y = tf.cast(offset_y, tf.int32)
offset_x = tf.cast(offset_x, tf.int32)
# Scale, crop, and pad the input image.
scaled_image = tf.image.resize_images(
image, [scaled_height, scaled_width], method=resize_method)
scaled_image = scaled_image[offset_y:offset_y + output_size,
offset_x:offset_x + output_size, :]
output_image = tf.image.pad_to_bounding_box(scaled_image, 0, 0, output_size,
output_size)
# Update the boxes.
new_window = tf.cast(
tf.stack([offset_y, offset_x,
offset_y + output_size, offset_x + output_size]),
dtype=tf.float32)
new_window /= tf.cast(
tf.stack([scaled_height, scaled_width, scaled_height, scaled_width]),
dtype=tf.float32)
boxlist = box_list.BoxList(boxes)
boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window)
boxlist, indices = box_list_ops.prune_completely_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0])
boxlist = box_list_ops.clip_to_window(
boxlist, [0.0, 0.0, 1.0, 1.0], filter_nonoverlapping=False)
return_values = [output_image, boxlist.get(),
tf.gather(labels, indices),
tf.gather(label_weights, indices)]
if masks is not None:
new_masks = tf.expand_dims(masks, -1)
new_masks = tf.image.resize_images(
new_masks, [scaled_height, scaled_width], method=resize_method)
new_masks = new_masks[:, offset_y:offset_y + output_size,
offset_x:offset_x + output_size, :]
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, output_size, output_size)
new_masks = tf.squeeze(new_masks, [-1])
return_values.append(tf.gather(new_masks, indices))
if keypoints is not None:
keypoints = tf.gather(keypoints, indices)
keypoints = keypoint_ops.change_coordinate_frame(keypoints, new_window)
keypoints = keypoint_ops.prune_outside_window(
keypoints, [0.0, 0.0, 1.0, 1.0])
return_values.append(keypoints)
if label_confidences is not None:
return_values.append(tf.gather(label_confidences, indices))
return return_values
def get_default_func_arg_map(include_label_weights=True,
include_label_confidences=False,
include_multiclass_scores=False,
include_instance_masks=False,
include_instance_mask_weights=False,
include_keypoints=False,
include_keypoint_visibilities=False,
include_dense_pose=False,
include_keypoint_depths=False):
"""Returns the default mapping from a preprocessor function to its args.
Args:
include_label_weights: If True, preprocessing functions will modify the
label weights, too.
include_label_confidences: If True, preprocessing functions will modify the
label confidences, too.
include_multiclass_scores: If True, preprocessing functions will modify the
multiclass scores, too.
include_instance_masks: If True, preprocessing functions will modify the
instance masks, too.
include_instance_mask_weights: If True, preprocessing functions will modify
the instance mask weights.
include_keypoints: If True, preprocessing functions will modify the
keypoints, too.
include_keypoint_visibilities: If True, preprocessing functions will modify
the keypoint visibilities, too.
include_dense_pose: If True, preprocessing functions will modify the
DensePose labels, too.
include_keypoint_depths: If True, preprocessing functions will modify the
keypoint depth labels, too.
Returns:
A map from preprocessing functions to the arguments they receive.
"""
groundtruth_label_weights = None
if include_label_weights:
groundtruth_label_weights = (
fields.InputDataFields.groundtruth_weights)
groundtruth_label_confidences = None
if include_label_confidences:
groundtruth_label_confidences = (
fields.InputDataFields.groundtruth_confidences)
multiclass_scores = None
if include_multiclass_scores:
multiclass_scores = (fields.InputDataFields.multiclass_scores)
groundtruth_instance_masks = None
if include_instance_masks:
groundtruth_instance_masks = (
fields.InputDataFields.groundtruth_instance_masks)
groundtruth_instance_mask_weights = None
if include_instance_mask_weights:
groundtruth_instance_mask_weights = (
fields.InputDataFields.groundtruth_instance_mask_weights)
groundtruth_keypoints = None
if include_keypoints:
groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints
groundtruth_keypoint_visibilities = None
if include_keypoint_visibilities:
groundtruth_keypoint_visibilities = (
fields.InputDataFields.groundtruth_keypoint_visibilities)
groundtruth_dp_num_points = None
groundtruth_dp_part_ids = None
groundtruth_dp_surface_coords = None
if include_dense_pose:
groundtruth_dp_num_points = (
fields.InputDataFields.groundtruth_dp_num_points)
groundtruth_dp_part_ids = (
fields.InputDataFields.groundtruth_dp_part_ids)
groundtruth_dp_surface_coords = (
fields.InputDataFields.groundtruth_dp_surface_coords)
groundtruth_keypoint_depths = None
groundtruth_keypoint_depth_weights = None
if include_keypoint_depths:
groundtruth_keypoint_depths = (
fields.InputDataFields.groundtruth_keypoint_depths)
groundtruth_keypoint_depth_weights = (
fields.InputDataFields.groundtruth_keypoint_depth_weights)
prep_func_arg_map = {
normalize_image: (fields.InputDataFields.image,),
random_horizontal_flip: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,
groundtruth_keypoint_visibilities,
groundtruth_dp_part_ids,
groundtruth_dp_surface_coords,
groundtruth_keypoint_depths,
groundtruth_keypoint_depth_weights,
),
random_vertical_flip: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,
),
random_rotation90: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,
),
random_pixel_value_scale: (fields.InputDataFields.image,),
random_image_scale: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
random_rgb_to_gray: (fields.InputDataFields.image,),
random_adjust_brightness: (fields.InputDataFields.image,),
random_adjust_contrast: (fields.InputDataFields.image,),
random_adjust_hue: (fields.InputDataFields.image,),
random_adjust_saturation: (fields.InputDataFields.image,),
random_distort_color: (fields.InputDataFields.image,),
random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,),
random_crop_image:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_label_confidences,
multiclass_scores, groundtruth_instance_masks,
groundtruth_instance_mask_weights, groundtruth_keypoints,
groundtruth_keypoint_visibilities, groundtruth_dp_num_points,
groundtruth_dp_part_ids, groundtruth_dp_surface_coords),
random_pad_image:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks,
groundtruth_keypoints, groundtruth_dp_surface_coords),
random_absolute_pad_image:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks,
groundtruth_keypoints, groundtruth_dp_surface_coords),
random_crop_pad_image: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences, multiclass_scores),
random_crop_to_aspect_ratio: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences,
multiclass_scores,
groundtruth_instance_masks,
groundtruth_keypoints,
),
random_pad_to_aspect_ratio: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_instance_masks,
groundtruth_keypoints,
),
random_black_patches: (fields.InputDataFields.image,),
random_jpeg_quality: (fields.InputDataFields.image,),
random_downscale_to_target_pixels: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
random_patch_gaussian: (fields.InputDataFields.image,),
autoaugment_image: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
),
retain_boxes_above_threshold: (
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences,
multiclass_scores,
groundtruth_instance_masks,
groundtruth_keypoints,
),
drop_label_probabilistically: (
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences,
multiclass_scores,
groundtruth_instance_masks,
groundtruth_keypoints,
),
remap_labels: (fields.InputDataFields.groundtruth_classes,),
image_to_float: (fields.InputDataFields.image,),
random_resize_method: (fields.InputDataFields.image,),
resize_to_range: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
resize_to_min_dimension: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
scale_boxes_to_pixel_coordinates: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
groundtruth_keypoints,
),
resize_image: (
fields.InputDataFields.image,
groundtruth_instance_masks,
),
subtract_channel_mean: (fields.InputDataFields.image,),
one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,),
rgb_to_gray: (fields.InputDataFields.image,),
random_self_concat_image:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_label_confidences,
multiclass_scores),
ssd_random_crop: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences, multiclass_scores,
groundtruth_instance_masks, groundtruth_keypoints),
ssd_random_crop_pad: (fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences, multiclass_scores),
ssd_random_crop_fixed_aspect_ratio:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_label_confidences,
multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints
),
ssd_random_crop_pad_fixed_aspect_ratio: (
fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights,
groundtruth_label_confidences,
multiclass_scores,
groundtruth_instance_masks,
groundtruth_keypoints,
),
convert_class_logits_to_softmax: (multiclass_scores,),
random_square_crop_by_scale:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_label_confidences,
groundtruth_instance_masks, groundtruth_keypoints),
random_scale_crop_and_pad_to_square:
(fields.InputDataFields.image,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
groundtruth_label_weights, groundtruth_instance_masks,
groundtruth_keypoints, groundtruth_label_confidences),
adjust_gamma: (fields.InputDataFields.image,),
}
return prep_func_arg_map
def preprocess(tensor_dict,
preprocess_options,
func_arg_map=None,
preprocess_vars_cache=None):
"""Preprocess images and bounding boxes.
Various types of preprocessing (to be implemented) based on the
preprocess_options dictionary e.g. "crop image" (affects image and possibly
boxes), "white balance image" (affects only image), etc. If self._options
is None, no preprocessing is done.
Args:
tensor_dict: dictionary that contains images, boxes, and can contain other
things as well.
images-> rank 4 float32 tensor contains
1 image -> [1, height, width, 3].
with pixel values varying between [0, 1]
boxes-> rank 2 float32 tensor containing
the bounding boxes -> [N, 4].
Boxes are in normalized form meaning
their coordinates vary between [0, 1].
Each row is in the form
of [ymin, xmin, ymax, xmax].
preprocess_options: It is a list of tuples, where each tuple contains a
function and a dictionary that contains arguments and
their values.
func_arg_map: mapping from preprocessing functions to arguments that they
expect to receive and return.
preprocess_vars_cache: PreprocessorCache object that records previously
performed augmentations. Updated in-place. If this
function is called multiple times with the same
non-null cache, it will perform deterministically.
Returns:
tensor_dict: which contains the preprocessed images, bounding boxes, etc.
Raises:
ValueError: (a) If the functions passed to Preprocess
are not in func_arg_map.
(b) If the arguments that a function needs
do not exist in tensor_dict.
(c) If image in tensor_dict is not rank 4
"""
if func_arg_map is None:
func_arg_map = get_default_func_arg_map()
# changes the images to image (rank 4 to rank 3) since the functions
# receive rank 3 tensor for image
if fields.InputDataFields.image in tensor_dict:
images = tensor_dict[fields.InputDataFields.image]
if len(images.get_shape()) != 4:
raise ValueError('images in tensor_dict should be rank 4')
image = tf.squeeze(images, axis=0)
tensor_dict[fields.InputDataFields.image] = image
# Preprocess inputs based on preprocess_options
for option in preprocess_options:
func, params = option
if func not in func_arg_map:
raise ValueError('The function %s does not exist in func_arg_map' %
(func.__name__))
arg_names = func_arg_map[func]
for a in arg_names:
if a is not None and a not in tensor_dict:
raise ValueError('The function %s requires argument %s' %
(func.__name__, a))
def get_arg(key):
return tensor_dict[key] if key is not None else None
args = [get_arg(a) for a in arg_names]
if preprocess_vars_cache is not None:
if six.PY2:
# pylint: disable=deprecated-method
arg_spec = inspect.getargspec(func)
# pylint: enable=deprecated-method
else:
arg_spec = inspect.getfullargspec(func)
if 'preprocess_vars_cache' in arg_spec.args:
params['preprocess_vars_cache'] = preprocess_vars_cache
results = func(*args, **params)
if not isinstance(results, (list, tuple)):
results = (results,)
# Removes None args since the return values will not contain those.
arg_names = [arg_name for arg_name in arg_names if arg_name is not None]
for res, arg_name in zip(results, arg_names):
tensor_dict[arg_name] = res
# changes the image to images (rank 3 to rank 4) to be compatible to what
# we received in the first place
if fields.InputDataFields.image in tensor_dict:
image = tensor_dict[fields.InputDataFields.image]
images = tf.expand_dims(image, 0)
tensor_dict[fields.InputDataFields.image] = images
return tensor_dict
| 201,098 | 41.16796 | 94 | py |
models | models-master/research/object_detection/core/model_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.core import model
from object_detection.utils import test_case
class FakeModel(model.DetectionModel):
def __init__(self):
# sub-networks containing weights of different shapes.
self._network1 = tf.keras.Sequential([
tf.keras.layers.Conv2D(8, 1)
])
self._network2 = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 1)
])
super(FakeModel, self).__init__(num_classes=0)
def preprocess(self, images):
return images, tf.shape(images)
def predict(self, images, shapes):
return {'prediction': self._network2(self._network1(images))}
def postprocess(self, prediction_dict, shapes):
return prediction_dict
def loss(self):
return tf.constant(0.0)
def updates(self):
return []
def restore_map(self):
return {}
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def regularization_losses(self):
return []
class ModelTest(test_case.TestCase):
def test_model_call(self):
detection_model = FakeModel()
def graph_fn():
return detection_model(tf.zeros((1, 128, 128, 3)))
result = self.execute(graph_fn, [])
self.assertEqual(result['prediction'].shape,
(1, 128, 128, 16))
def test_freeze(self):
detection_model = FakeModel()
detection_model(tf.zeros((1, 128, 128, 3)))
net1_var_shapes = [tuple(var.get_shape().as_list()) for var in
detection_model._network1.trainable_variables]
del detection_model
detection_model = FakeModel()
detection_model._network2.trainable = False
detection_model(tf.zeros((1, 128, 128, 3)))
var_shapes = [tuple(var.get_shape().as_list()) for var in
detection_model._network1.trainable_variables]
self.assertEqual(set(net1_var_shapes), set(var_shapes))
if __name__ == '__main__':
tf.test.main()
| 2,744 | 25.911765 | 80 | py |
models | models-master/research/object_detection/core/preprocessor_cache.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Records previous preprocessing operations and allows them to be repeated.
Used with object_detection.core.preprocessor. Passing a PreprocessorCache
into individual data augmentation functions or the general preprocess() function
will store all randomly generated variables in the PreprocessorCache. When
a preprocessor function is called multiple times with the same
PreprocessorCache object, that function will perform the same augmentation
on all calls.
"""
import collections
class PreprocessorCache(object):
"""Dictionary wrapper storing random variables generated during preprocessing.
"""
# Constant keys representing different preprocessing functions
ROTATION90 = 'rotation90'
HORIZONTAL_FLIP = 'horizontal_flip'
VERTICAL_FLIP = 'vertical_flip'
PIXEL_VALUE_SCALE = 'pixel_value_scale'
IMAGE_SCALE = 'image_scale'
RGB_TO_GRAY = 'rgb_to_gray'
ADJUST_BRIGHTNESS = 'adjust_brightness'
ADJUST_CONTRAST = 'adjust_contrast'
ADJUST_HUE = 'adjust_hue'
ADJUST_SATURATION = 'adjust_saturation'
DISTORT_COLOR = 'distort_color'
STRICT_CROP_IMAGE = 'strict_crop_image'
CROP_IMAGE = 'crop_image'
PAD_IMAGE = 'pad_image'
CROP_TO_ASPECT_RATIO = 'crop_to_aspect_ratio'
RESIZE_METHOD = 'resize_method'
PAD_TO_ASPECT_RATIO = 'pad_to_aspect_ratio'
BLACK_PATCHES = 'black_patches'
ADD_BLACK_PATCH = 'add_black_patch'
SELECTOR = 'selector'
SELECTOR_TUPLES = 'selector_tuples'
SELF_CONCAT_IMAGE = 'self_concat_image'
SSD_CROP_SELECTOR_ID = 'ssd_crop_selector_id'
SSD_CROP_PAD_SELECTOR_ID = 'ssd_crop_pad_selector_id'
JPEG_QUALITY = 'jpeg_quality'
DOWNSCALE_TO_TARGET_PIXELS = 'downscale_to_target_pixels'
PATCH_GAUSSIAN = 'patch_gaussian'
SQUARE_CROP_BY_SCALE = 'square_crop_scale'
# 27 permitted function ids
_VALID_FNS = [ROTATION90, HORIZONTAL_FLIP, VERTICAL_FLIP, PIXEL_VALUE_SCALE,
IMAGE_SCALE, RGB_TO_GRAY, ADJUST_BRIGHTNESS, ADJUST_CONTRAST,
ADJUST_HUE, ADJUST_SATURATION, DISTORT_COLOR, STRICT_CROP_IMAGE,
CROP_IMAGE, PAD_IMAGE, CROP_TO_ASPECT_RATIO, RESIZE_METHOD,
PAD_TO_ASPECT_RATIO, BLACK_PATCHES, ADD_BLACK_PATCH, SELECTOR,
SELECTOR_TUPLES, SELF_CONCAT_IMAGE, SSD_CROP_SELECTOR_ID,
SSD_CROP_PAD_SELECTOR_ID, JPEG_QUALITY,
DOWNSCALE_TO_TARGET_PIXELS, PATCH_GAUSSIAN,
SQUARE_CROP_BY_SCALE]
def __init__(self):
self._history = collections.defaultdict(dict)
def clear(self):
"""Resets cache."""
self._history = collections.defaultdict(dict)
def get(self, function_id, key):
"""Gets stored value given a function id and key.
Args:
function_id: identifier for the preprocessing function used.
key: identifier for the variable stored.
Returns:
value: the corresponding value, expected to be a tensor or
nested structure of tensors.
Raises:
ValueError: if function_id is not one of the 23 valid function ids.
"""
if function_id not in self._VALID_FNS:
raise ValueError('Function id not recognized: %s.' % str(function_id))
return self._history[function_id].get(key)
def update(self, function_id, key, value):
"""Adds a value to the dictionary.
Args:
function_id: identifier for the preprocessing function used.
key: identifier for the variable stored.
value: the value to store, expected to be a tensor or nested structure
of tensors.
Raises:
ValueError: if function_id is not one of the 23 valid function ids.
"""
if function_id not in self._VALID_FNS:
raise ValueError('Function id not recognized: %s.' % str(function_id))
self._history[function_id][key] = value
| 4,409 | 39.090909 | 80 | py |
models | models-master/research/object_detection/core/box_coder.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import six
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for box coder."""
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
encoded_boxes.get_shape().assert_has_rank(3)
if (shape_utils.get_dim_as_int(encoded_boxes.get_shape()[1])
!= anchors.num_boxes_static()):
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(shape_utils.get_dim_as_int(encoded_boxes.get_shape()[1]),
anchors.num_boxes_static()))
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
| 5,144 | 31.358491 | 80 | py |
models | models-master/research/object_detection/core/box_list.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferrable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow.compat.v1 as tf
from object_detection.utils import shape_utils
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data: {}'.format(
boxes.shape))
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.
"""
return shape_utils.get_dim_as_int(self.data['boxes'].get_shape()[0])
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| 6,859 | 31.511848 | 80 | py |
models | models-master/research/object_detection/matchers/argmax_matcher.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import matcher
from object_detection.utils import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False,
use_matmul_gather=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and umatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
super(ArgMaxMatcher, self).__init__(use_matmul_gather=use_matmul_gather)
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: {}, unmatched: {}'.format(
self._matched_threshold,
self._unmatched_threshold))
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix, valid_rows):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
valid_rows: a boolean tensor of shape [N] indicating valid rows.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = (
tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1]) *
tf.cast(tf.expand_dims(valid_rows, axis=-1), dtype=tf.float32))
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if shape_utils.get_dim_as_int(similarity_matrix.shape[0]) == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
| 9,524 | 44.574163 | 80 | py |
models | models-master/research/object_detection/matchers/bipartite_matcher.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bipartite matcher implementation."""
import tensorflow.compat.v1 as tf
from tensorflow.contrib.image.python.ops import image_ops
from object_detection.core import matcher
class GreedyBipartiteMatcher(matcher.Matcher):
"""Wraps a Tensorflow greedy bipartite matcher."""
def __init__(self, use_matmul_gather=False):
"""Constructs a Matcher.
Args:
use_matmul_gather: Force constructed match objects to use matrix
multiplication based gather instead of standard tf.gather.
(Default: False).
"""
super(GreedyBipartiteMatcher, self).__init__(
use_matmul_gather=use_matmul_gather)
def _match(self, similarity_matrix, valid_rows):
"""Bipartite matches a collection rows and columns. A greedy bi-partite.
TODO(rathodv): Add num_valid_columns options to match only that many columns
with all the rows.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher values mean more similar.
valid_rows: A boolean tensor of shape [N] indicating the rows that are
valid.
Returns:
match_results: int32 tensor of shape [M] with match_results[i]=-1
meaning that column i is not matched and otherwise that it is matched to
row match_results[i].
"""
valid_row_sim_matrix = tf.gather(similarity_matrix,
tf.squeeze(tf.where(valid_rows), axis=-1))
invalid_row_sim_matrix = tf.gather(
similarity_matrix,
tf.squeeze(tf.where(tf.logical_not(valid_rows)), axis=-1))
similarity_matrix = tf.concat(
[valid_row_sim_matrix, invalid_row_sim_matrix], axis=0)
# Convert similarity matrix to distance matrix as tf.image.bipartite tries
# to find minimum distance matches.
distance_matrix = -1 * similarity_matrix
num_valid_rows = tf.reduce_sum(tf.cast(valid_rows, dtype=tf.float32))
_, match_results = image_ops.bipartite_match(
distance_matrix, num_valid_rows=num_valid_rows)
match_results = tf.reshape(match_results, [-1])
match_results = tf.cast(match_results, tf.int32)
return match_results
| 2,834 | 38.929577 | 80 | py |
models | models-master/research/object_detection/matchers/bipartite_matcher_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.bipartite_matcher."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class GreedyBipartiteMatcherTest(test_case.TestCase):
def test_get_expected_matches_when_all_rows_are_valid(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.ones([2], dtype=bool)
expected_match_results = [-1, 1, 0]
def graph_fn(similarity_matrix, valid_rows):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows])
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_all_rows_be_default(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
expected_match_results = [-1, 1, 0]
def graph_fn(similarity_matrix):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix])
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_no_matches_with_zero_valid_rows(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.zeros([2], dtype=bool)
expected_match_results = [-1, -1, -1]
def graph_fn(similarity_matrix, valid_rows):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows])
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_only_one_valid_row(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.array([True, False], dtype=bool)
expected_match_results = [-1, -1, 0]
def graph_fn(similarity_matrix, valid_rows):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows])
self.assertAllEqual(match_results_out, expected_match_results)
def test_get_expected_matches_with_only_one_valid_row_at_bottom(self):
similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]],
dtype=np.float32)
valid_rows = np.array([False, True], dtype=bool)
expected_match_results = [-1, -1, 0]
def graph_fn(similarity_matrix, valid_rows):
matcher = bipartite_matcher.GreedyBipartiteMatcher()
match = matcher.match(similarity_matrix, valid_rows=valid_rows)
return match._match_results
match_results_out = self.execute(graph_fn, [similarity_matrix, valid_rows])
self.assertAllEqual(match_results_out, expected_match_results)
if __name__ == '__main__':
tf.test.main()
| 4,274 | 44.967742 | 96 | py |
models | models-master/research/object_detection/matchers/hungarian_matcher_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.bipartite_matcher."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import test_case
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.matchers import hungarian_matcher # pylint: disable=g-import-not-at-top
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class HungarianBipartiteMatcherTest(test_case.TestCase):
def test_get_expected_matches_when_all_rows_are_valid(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.ones([2], dtype=bool)
expected_match_results = [-1, 1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_expected_matches_with_all_rows_be_default(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
expected_match_results = [-1, 1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_no_matches_with_zero_valid_rows(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.zeros([2], dtype=bool)
expected_match_results = [-1, -1, -1]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_expected_matches_with_only_one_valid_row(self):
similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]],
dtype=np.float32)
valid_rows = np.array([True, False], dtype=bool)
expected_match_results = [-1, -1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_expected_matches_with_only_one_valid_row_at_bottom(self):
similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]],
dtype=np.float32)
valid_rows = np.array([False, True], dtype=bool)
expected_match_results = [-1, -1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
def test_get_expected_matches_with_two_valid_rows(self):
similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8],
[0.84, 0.32, 0.2]],
dtype=np.float32)
valid_rows = np.array([True, False, True], dtype=bool)
expected_match_results = [1, -1, 0]
matcher = hungarian_matcher.HungarianBipartiteMatcher()
match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows)
self.assertAllEqual(match_results_out._match_results.numpy(),
expected_match_results)
if __name__ == '__main__':
tf.test.main()
| 4,376 | 40.292453 | 96 | py |
models | models-master/research/object_detection/matchers/hungarian_matcher.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hungarian bipartite matcher implementation."""
import numpy as np
from scipy.optimize import linear_sum_assignment
import tensorflow.compat.v1 as tf
from object_detection.core import matcher
class HungarianBipartiteMatcher(matcher.Matcher):
"""Wraps a Hungarian bipartite matcher into TensorFlow."""
def _match(self, similarity_matrix, valid_rows):
"""Optimally bipartite matches a collection rows and columns.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher values mean more similar.
valid_rows: A boolean tensor of shape [N] indicating the rows that are
valid.
Returns:
match_results: int32 tensor of shape [M] with match_results[i]=-1
meaning that column i is not matched and otherwise that it is matched to
row match_results[i].
"""
valid_row_sim_matrix = tf.gather(similarity_matrix,
tf.squeeze(tf.where(valid_rows), axis=-1))
distance_matrix = -1 * valid_row_sim_matrix
def numpy_wrapper(inputs):
def numpy_matching(input_matrix):
row_indices, col_indices = linear_sum_assignment(input_matrix)
match_results = np.full(input_matrix.shape[1], -1)
match_results[col_indices] = row_indices
return match_results.astype(np.int32)
return tf.numpy_function(numpy_matching, inputs, Tout=[tf.int32])
matching_result = tf.autograph.experimental.do_not_convert(
numpy_wrapper)([distance_matrix])
return tf.reshape(matching_result, [-1])
| 2,258 | 37.288136 | 80 | py |
models | models-master/research/object_detection/matchers/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/matchers/argmax_matcher_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.matchers.argmax_matcher."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.matchers import argmax_matcher
from object_detection.utils import test_case
class ArgMaxMatcherTest(test_case.TestCase):
def test_return_correct_matches_with_default_thresholds(self):
def graph_fn(similarity_matrix):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
match = matcher.match(similarity_matrix)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1., 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_rows = np.array([2, 0, 1, 0, 1])
(res_matched_cols, res_unmatched_cols,
res_match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(res_match_results[res_matched_cols],
expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], [0, 1, 2, 3, 4])
self.assertFalse(np.all(res_unmatched_cols))
def test_return_correct_matches_with_empty_rows(self):
def graph_fn(similarity_matrix):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
match = matcher.match(similarity_matrix)
return match.unmatched_column_indicator()
similarity = 0.2 * np.ones([0, 5], dtype=np.float32)
res_unmatched_cols = self.execute(graph_fn, [similarity])
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0], np.arange(5))
def test_return_correct_matches_with_matched_threshold(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1, 2])
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_with_matched_and_unmatched_threshold(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_negatives_lower_than_unmatched_false(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(
matched_threshold=3.,
unmatched_threshold=2.,
negatives_lower_than_unmatched=False)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_not_using_force_match(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 3])
expected_matched_rows = np.array([2, 0])
expected_unmatched_cols = np.array([1, 2, 4])
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_while_using_force_match(self):
def graph_fn(similarity):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.,
force_match_for_each_row=True)
match = matcher.match(similarity)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.float32)
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([2, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_return_correct_matches_using_force_match_padded_groundtruth(self):
def graph_fn(similarity, valid_rows):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3.,
unmatched_threshold=2.,
force_match_for_each_row=True)
match = matcher.match(similarity, valid_rows)
matched_cols = match.matched_column_indicator()
unmatched_cols = match.unmatched_column_indicator()
match_results = match.match_results
return (matched_cols, unmatched_cols, match_results)
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[0, 0, 0, 0, 0],
[3, 0, -1, 2, 0],
[0, 0, 0, 0, 0]], dtype=np.float32)
valid_rows = np.array([True, True, False, True, False])
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([3, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
(res_matched_cols, res_unmatched_cols,
match_results) = self.execute(graph_fn, [similarity, valid_rows])
self.assertAllEqual(match_results[res_matched_cols], expected_matched_rows)
self.assertAllEqual(np.nonzero(res_matched_cols)[0], expected_matched_cols)
self.assertAllEqual(np.nonzero(res_unmatched_cols)[0],
expected_unmatched_cols)
def test_valid_arguments_corner_case(self):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1)
def test_invalid_arguments_corner_case_negatives_lower_than_thres_false(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1,
negatives_lower_than_unmatched=False)
def test_invalid_arguments_no_matched_threshold(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=None,
unmatched_threshold=4)
def test_invalid_arguments_unmatched_thres_larger_than_matched_thres(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=2)
if __name__ == '__main__':
tf.test.main()
| 10,744 | 44.529661 | 80 | py |
models | models-master/research/object_detection/anchor_generators/flexible_grid_anchor_generator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly corresponding to multiple CNN layers."""
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.core import anchor_generator
from object_detection.core import box_list_ops
class FlexibleGridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generate a grid of anchors for multiple CNN layers of different scale."""
def __init__(self, base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=True):
"""Constructs a FlexibleGridAnchorGenerator.
This generator is more flexible than the multiple_grid_anchor_generator
and multiscale_grid_anchor_generator, and can generate any of the anchors
that they can generate, plus additional anchor configurations. In
particular, it allows the explicit specification of scale and aspect ratios
at each layer without making any assumptions between the relationship
between scales and aspect ratios between layers.
Args:
base_sizes: list of tuples of anchor base sizes. For example, setting
base_sizes=[(1, 2, 3), (4, 5)] means that we want 3 anchors at each
grid point on the first layer with the base sizes of 1, 2, and 3, and 2
anchors at each grid point on the second layer with the base sizes of
4 and 5.
aspect_ratios: list or tuple of aspect ratios. For example, setting
aspect_ratios=[(1.0, 2.0, 0.5), (1.0, 2.0)] means that we want 3 anchors
at each grid point on the first layer with aspect ratios of 1.0, 2.0,
and 0.5, and 2 anchors at each grid point on the sercond layer with the
base sizes of 1.0 and 2.0.
anchor_strides: list of pairs of strides in pixels (in y and x directions
respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]
means that we want the anchors corresponding to the first layer to be
strided by 25 pixels and those in the second layer to be strided by 50
pixels in both y and x directions.
anchor_offsets: list of pairs of offsets in pixels (in y and x directions
respectively). The offset specifies where we want the center of the
(0, 0)-th anchor to lie for each layer. For example, setting
anchor_offsets=[(10, 10), (20, 20)]) means that we want the
(0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space
and likewise that we want the (0, 0)-th anchor of the second layer to
lie at (25, 25) in pixel space.
normalize_coordinates: whether to produce anchors in normalized
coordinates. (defaults to True).
"""
self._base_sizes = base_sizes
self._aspect_ratios = aspect_ratios
self._anchor_strides = anchor_strides
self._anchor_offsets = anchor_offsets
self._normalize_coordinates = normalize_coordinates
def name_scope(self):
return 'FlexibleGridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the Generate function.
"""
return [len(size) for size in self._base_sizes]
def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
"""Generates a collection of bounding boxes to be used as anchors.
Currently we require the input image shape to be statically defined. That
is, im_height and im_width should be integers rather than tensors.
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0), (height_1, width_1), ...]. For example,
setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
correspond to an 8x8 layer followed by a 7x7 layer.
im_height: the height of the image to generate the grid for. If both
im_height and im_width are 1, anchors can only be generated in
absolute coordinates.
im_width: the width of the image to generate the grid for. If both
im_height and im_width are 1, anchors can only be generated in
absolute coordinates.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if im_height and im_width are 1, but normalized coordinates
were requested.
"""
anchor_grid_list = []
for (feat_shape, base_sizes, aspect_ratios, anchor_stride, anchor_offset
) in zip(feature_map_shape_list, self._base_sizes, self._aspect_ratios,
self._anchor_strides, self._anchor_offsets):
anchor_grid = grid_anchor_generator.tile_anchors(
feat_shape[0],
feat_shape[1],
tf.cast(tf.convert_to_tensor(base_sizes), dtype=tf.float32),
tf.cast(tf.convert_to_tensor(aspect_ratios), dtype=tf.float32),
tf.constant([1.0, 1.0]),
tf.cast(tf.convert_to_tensor(anchor_stride), dtype=tf.float32),
tf.cast(tf.convert_to_tensor(anchor_offset), dtype=tf.float32))
num_anchors = anchor_grid.num_boxes_static()
if num_anchors is None:
num_anchors = anchor_grid.num_boxes()
anchor_indices = tf.zeros([num_anchors])
anchor_grid.add_field('feature_map_index', anchor_indices)
if self._normalize_coordinates:
if im_height == 1 or im_width == 1:
raise ValueError(
'Normalized coordinates were requested upon construction of the '
'FlexibleGridAnchorGenerator, but a subsequent call to '
'generate did not supply dimension information.')
anchor_grid = box_list_ops.to_normalized_coordinates(
anchor_grid, im_height, im_width, check_range=False)
anchor_grid_list.append(anchor_grid)
return anchor_grid_list
| 6,584 | 47.777778 | 80 | py |
models | models-master/research/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generators.multiple_grid_anchor_generator_test.py."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import multiple_grid_anchor_generator as ag
from object_detection.utils import test_case
class MultipleGridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor_grid(self):
"""Builds a 1x1 anchor grid to test the size of the output boxes."""
def graph_fn():
box_specs_list = [[(.5, .25), (1.0, .25), (2.0, .25),
(.5, 1.0), (1.0, 1.0), (2.0, 1.0),
(.5, 4.0), (1.0, 4.0), (2.0, 4.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([256, 256], dtype=tf.float32),
anchor_strides=[(16, 16)],
anchor_offsets=[(7, -3)])
anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)])
return anchors_list[0].get()
exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61],
[-505, -131, 519, 125], [-57, -67, 71, 61],
[-121, -131, 135, 125], [-249, -259, 263, 253],
[-25, -131, 39, 125], [-57, -259, 71, 253],
[-121, -515, 135, 509]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid(self):
def graph_fn():
box_specs_list = [[(0.5, 1.0), (1.0, 1.0), (2.0, 1.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([10, 10], dtype=tf.float32),
anchor_strides=[(19, 19)],
anchor_offsets=[(0, 0)])
anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)])
return anchors_list[0].get()
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid_non_square(self):
def graph_fn():
box_specs_list = [[(1.0, 1.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list, base_anchor_size=tf.constant([1, 1],
dtype=tf.float32))
anchors_list = anchor_generator.generate(feature_map_shape_list=[(
tf.constant(1, dtype=tf.int32), tf.constant(2, dtype=tf.int32))])
return anchors_list[0].get()
exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_dynamic_size_anchor_grid(self):
def graph_fn(height, width):
box_specs_list = [[(1.0, 1.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list, base_anchor_size=tf.constant([1, 1],
dtype=tf.float32))
anchors_list = anchor_generator.generate(feature_map_shape_list=[(height,
width)])
return anchors_list[0].get()
exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]]
anchor_corners_out = self.execute_cpu(graph_fn,
[np.array(1, dtype=np.int32),
np.array(2, dtype=np.int32)])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid_normalized(self):
def graph_fn():
box_specs_list = [[(1.0, 1.0)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list, base_anchor_size=tf.constant([1, 1],
dtype=tf.float32))
anchors_list = anchor_generator.generate(
feature_map_shape_list=[(tf.constant(1, dtype=tf.int32), tf.constant(
2, dtype=tf.int32))],
im_height=320,
im_width=640)
return anchors_list[0].get()
exp_anchor_corners = [[0., 0., 1., 0.5], [0., 0.5, 1., 1.]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_multiple_grids(self):
def graph_fn():
box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],
[(1.0, 1.0), (1.0, 0.5)]]
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchors_list = anchor_generator.generate(feature_map_shape_list=[(4, 4), (
2, 2)])
return [anchors.get() for anchors in anchors_list]
# height and width of box with .5 aspect ratio
h = np.sqrt(2)
w = 1.0/np.sqrt(2)
exp_small_grid_corners = [[-.25, -.25, .75, .75],
[.25-.5*h, .25-.5*w, .25+.5*h, .25+.5*w],
[-.25, .25, .75, 1.25],
[.25-.5*h, .75-.5*w, .25+.5*h, .75+.5*w],
[.25, -.25, 1.25, .75],
[.75-.5*h, .25-.5*w, .75+.5*h, .25+.5*w],
[.25, .25, 1.25, 1.25],
[.75-.5*h, .75-.5*w, .75+.5*h, .75+.5*w]]
# only test first entry of larger set of anchors
exp_big_grid_corners = [[.125-.5, .125-.5, .125+.5, .125+.5],
[.125-1.0, .125-1.0, .125+1.0, .125+1.0],
[.125-.5*h, .125-.5*w, .125+.5*h, .125+.5*w],]
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
self.assertEqual(anchor_corners_out.shape, (56, 4))
big_grid_corners = anchor_corners_out[0:3, :]
small_grid_corners = anchor_corners_out[48:, :]
self.assertAllClose(small_grid_corners, exp_small_grid_corners)
self.assertAllClose(big_grid_corners, exp_big_grid_corners)
def test_construct_multiple_grids_with_clipping(self):
def graph_fn():
box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],
[(1.0, 1.0), (1.0, 0.5)]]
clip_window = tf.constant([0, 0, 1, 1], dtype=tf.float32)
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
clip_window=clip_window)
anchors_list = anchor_generator.generate(feature_map_shape_list=[(4, 4), (
2, 2)])
return [anchors.get() for anchors in anchors_list]
# height and width of box with .5 aspect ratio
h = np.sqrt(2)
w = 1.0/np.sqrt(2)
exp_small_grid_corners = [[0, 0, .75, .75],
[0, 0, .25+.5*h, .25+.5*w],
[0, .25, .75, 1],
[0, .75-.5*w, .25+.5*h, 1],
[.25, 0, 1, .75],
[.75-.5*h, 0, 1, .25+.5*w],
[.25, .25, 1, 1],
[.75-.5*h, .75-.5*w, 1, 1]]
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
small_grid_corners = anchor_corners_out[48:, :]
self.assertAllClose(small_grid_corners, exp_small_grid_corners)
def test_invalid_box_specs(self):
# not all box specs are pairs
box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],
[(1.0, 1.0), (1.0, 0.5, .3)]]
with self.assertRaises(ValueError):
ag.MultipleGridAnchorGenerator(box_specs_list)
# box_specs_list is not a list of lists
box_specs_list = [(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)]
with self.assertRaises(ValueError):
ag.MultipleGridAnchorGenerator(box_specs_list)
def test_invalid_generate_arguments(self):
box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],
[(1.0, 1.0), (1.0, 0.5)]]
# incompatible lengths with box_specs_list
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2), (1, 1)])
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.5, .5)],
anchor_offsets=[(.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])
# not pairs
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4, 4), (2, 2)])
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25, .1), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])
with self.assertRaises(ValueError):
anchor_generator = ag.MultipleGridAnchorGenerator(
box_specs_list,
base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),
anchor_strides=[(.25, .25), (.5, .5)],
anchor_offsets=[(.125, .125), (.25, .25)])
anchor_generator.generate(feature_map_shape_list=[(4), (2, 2)])
class CreateSSDAnchorsTest(test_case.TestCase):
def test_create_ssd_anchors_returns_correct_shape(self):
def graph_fn1():
anchor_generator = ag.create_ssd_anchors(
num_layers=6,
min_scale=0.2,
max_scale=0.95,
aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3),
reduce_boxes_in_lowest_layer=True)
feature_map_shape_list = [(38, 38), (19, 19), (10, 10),
(5, 5), (3, 3), (1, 1)]
anchors_list = anchor_generator.generate(
feature_map_shape_list=feature_map_shape_list)
return [anchors.get() for anchors in anchors_list]
anchor_corners_out = np.concatenate(self.execute(graph_fn1, []), axis=0)
self.assertEqual(anchor_corners_out.shape, (7308, 4))
def graph_fn2():
anchor_generator = ag.create_ssd_anchors(
num_layers=6, min_scale=0.2, max_scale=0.95,
aspect_ratios=(1.0, 2.0, 3.0, 1.0/2, 1.0/3),
reduce_boxes_in_lowest_layer=False)
feature_map_shape_list = [(38, 38), (19, 19), (10, 10),
(5, 5), (3, 3), (1, 1)]
anchors_list = anchor_generator.generate(
feature_map_shape_list=feature_map_shape_list)
return [anchors.get() for anchors in anchors_list]
anchor_corners_out = np.concatenate(self.execute(graph_fn2, []), axis=0)
self.assertEqual(anchor_corners_out.shape, (11640, 4))
if __name__ == '__main__':
tf.test.main()
| 12,851 | 43.317241 | 83 | py |
models | models-master/research/object_detection/anchor_generators/multiscale_grid_anchor_generator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly corresponding to multiple CNN layers.
Generates grid anchors on the fly corresponding to multiple CNN layers as
described in:
"Focal Loss for Dense Object Detection" (https://arxiv.org/abs/1708.02002)
T.-Y. Lin, P. Goyal, R. Girshick, K. He, P. Dollar
"""
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.core import anchor_generator
from object_detection.core import box_list_ops
class MultiscaleGridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generate a grid of anchors for multiple CNN layers of different scale."""
def __init__(self, min_level, max_level, anchor_scale, aspect_ratios,
scales_per_octave, normalize_coordinates=True):
"""Constructs a MultiscaleGridAnchorGenerator.
To construct anchors, at multiple scale resolutions, one must provide a
the minimum level and maximum levels on a scale pyramid. To define the size
of anchor, the anchor scale is provided to decide the size relatively to the
stride of the corresponding feature map. The generator allows one pixel
location on feature map maps to multiple anchors, that have different aspect
ratios and intermediate scales.
Args:
min_level: minimum level in feature pyramid.
max_level: maximum level in feature pyramid.
anchor_scale: anchor scale and feature stride define the size of the base
anchor on an image. For example, given a feature pyramid with strides
[2^3, ..., 2^7] and anchor scale 4. The base anchor size is
4 * [2^3, ..., 2^7].
aspect_ratios: list or tuple of (float) aspect ratios to place on each
grid point.
scales_per_octave: integer number of intermediate scales per scale octave.
normalize_coordinates: whether to produce anchors in normalized
coordinates. (defaults to True).
"""
self._anchor_grid_info = []
self._aspect_ratios = aspect_ratios
self._scales_per_octave = scales_per_octave
self._normalize_coordinates = normalize_coordinates
scales = [2**(float(scale) / scales_per_octave)
for scale in range(scales_per_octave)]
aspects = list(aspect_ratios)
for level in range(min_level, max_level + 1):
anchor_stride = [2**level, 2**level]
base_anchor_size = [2**level * anchor_scale, 2**level * anchor_scale]
self._anchor_grid_info.append({
'level': level,
'info': [scales, aspects, base_anchor_size, anchor_stride]
})
def name_scope(self):
return 'MultiscaleGridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the Generate function.
"""
return len(self._anchor_grid_info) * [
len(self._aspect_ratios) * self._scales_per_octave]
def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
"""Generates a collection of bounding boxes to be used as anchors.
For training, we require the input image shape to be statically defined.
That is, im_height and im_width should be integers rather than tensors.
For inference, im_height and im_width can be either integers (for fixed
image size), or tensors (for arbitrary image size).
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0), (height_1, width_1), ...]. For example,
setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
correspond to an 8x8 layer followed by a 7x7 layer.
im_height: the height of the image to generate the grid for. If both
im_height and im_width are 1, anchors can only be generated in
absolute coordinates.
im_width: the width of the image to generate the grid for. If both
im_height and im_width are 1, anchors can only be generated in
absolute coordinates.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if im_height and im_width are not integers.
ValueError: if im_height and im_width are 1, but normalized coordinates
were requested.
"""
anchor_grid_list = []
for feat_shape, grid_info in zip(feature_map_shape_list,
self._anchor_grid_info):
# TODO(rathodv) check the feature_map_shape_list is consistent with
# self._anchor_grid_info
level = grid_info['level']
stride = 2**level
scales, aspect_ratios, base_anchor_size, anchor_stride = grid_info['info']
feat_h = feat_shape[0]
feat_w = feat_shape[1]
anchor_offset = [0, 0]
if isinstance(im_height, int) and isinstance(im_width, int):
if im_height % 2.0**level == 0 or im_height == 1:
anchor_offset[0] = stride / 2.0
if im_width % 2.0**level == 0 or im_width == 1:
anchor_offset[1] = stride / 2.0
if tf.is_tensor(im_height) and tf.is_tensor(im_width):
anchor_offset[0] = stride / 2.0
anchor_offset[1] = stride / 2.0
ag = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
(anchor_grid,) = ag.generate(feature_map_shape_list=[(feat_h, feat_w)])
if self._normalize_coordinates:
if im_height == 1 or im_width == 1:
raise ValueError(
'Normalized coordinates were requested upon construction of the '
'MultiscaleGridAnchorGenerator, but a subsequent call to '
'generate did not supply dimension information.')
anchor_grid = box_list_ops.to_normalized_coordinates(
anchor_grid, im_height, im_width, check_range=False)
anchor_grid_list.append(anchor_grid)
return anchor_grid_list
| 6,757 | 43.169935 | 80 | py |
models | models-master/research/object_detection/anchor_generators/multiple_grid_anchor_generator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly corresponding to multiple CNN layers.
Generates grid anchors on the fly corresponding to multiple CNN layers as
described in:
"SSD: Single Shot MultiBox Detector"
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
(see Section 2.2: Choosing scales and aspect ratios for default boxes)
"""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.core import anchor_generator
from object_detection.core import box_list_ops
class MultipleGridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generate a grid of anchors for multiple CNN layers."""
def __init__(self,
box_specs_list,
base_anchor_size=None,
anchor_strides=None,
anchor_offsets=None,
clip_window=None):
"""Constructs a MultipleGridAnchorGenerator.
To construct anchors, at multiple grid resolutions, one must provide a
list of feature_map_shape_list (e.g., [(8, 8), (4, 4)]), and for each grid
size, a corresponding list of (scale, aspect ratio) box specifications.
For example:
box_specs_list = [[(.1, 1.0), (.1, 2.0)], # for 8x8 grid
[(.2, 1.0), (.3, 1.0), (.2, 2.0)]] # for 4x4 grid
To support the fully convolutional setting, we pass grid sizes in at
generation time, while scale and aspect ratios are fixed at construction
time.
Args:
box_specs_list: list of list of (scale, aspect ratio) pairs with the
outside list having the same number of entries as feature_map_shape_list
(which is passed in at generation time).
base_anchor_size: base anchor size as [height, width]
(length-2 float numpy or Tensor, default=[1.0, 1.0]).
The height and width values are normalized to the
minimum dimension of the input height and width, so that
when the base anchor height equals the base anchor
width, the resulting anchor is square even if the input
image is not square.
anchor_strides: list of pairs of strides in pixels (in y and x directions
respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]
means that we want the anchors corresponding to the first layer to be
strided by 25 pixels and those in the second layer to be strided by 50
pixels in both y and x directions. If anchor_strides=None, they are set
to be the reciprocal of the corresponding feature map shapes.
anchor_offsets: list of pairs of offsets in pixels (in y and x directions
respectively). The offset specifies where we want the center of the
(0, 0)-th anchor to lie for each layer. For example, setting
anchor_offsets=[(10, 10), (20, 20)]) means that we want the
(0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space
and likewise that we want the (0, 0)-th anchor of the second layer to
lie at (25, 25) in pixel space. If anchor_offsets=None, then they are
set to be half of the corresponding anchor stride.
clip_window: a tensor of shape [4] specifying a window to which all
anchors should be clipped. If clip_window is None, then no clipping
is performed.
Raises:
ValueError: if box_specs_list is not a list of list of pairs
ValueError: if clip_window is not either None or a tensor of shape [4]
"""
if isinstance(box_specs_list, list) and all(
[isinstance(list_item, list) for list_item in box_specs_list]):
self._box_specs = box_specs_list
else:
raise ValueError('box_specs_list is expected to be a '
'list of lists of pairs')
if base_anchor_size is None:
base_anchor_size = [256, 256]
self._base_anchor_size = base_anchor_size
self._anchor_strides = anchor_strides
self._anchor_offsets = anchor_offsets
if clip_window is not None and clip_window.get_shape().as_list() != [4]:
raise ValueError('clip_window must either be None or a shape [4] tensor')
self._clip_window = clip_window
self._scales = []
self._aspect_ratios = []
for box_spec in self._box_specs:
if not all([isinstance(entry, tuple) and len(entry) == 2
for entry in box_spec]):
raise ValueError('box_specs_list is expected to be a '
'list of lists of pairs')
scales, aspect_ratios = zip(*box_spec)
self._scales.append(scales)
self._aspect_ratios.append(aspect_ratios)
for arg, arg_name in zip([self._anchor_strides, self._anchor_offsets],
['anchor_strides', 'anchor_offsets']):
if arg and not (isinstance(arg, list) and
len(arg) == len(self._box_specs)):
raise ValueError('%s must be a list with the same length '
'as self._box_specs' % arg_name)
if arg and not all([
isinstance(list_item, tuple) and len(list_item) == 2
for list_item in arg
]):
raise ValueError('%s must be a list of pairs.' % arg_name)
def name_scope(self):
return 'MultipleGridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the Generate function.
"""
return [len(box_specs) for box_specs in self._box_specs]
def _generate(self, feature_map_shape_list, im_height=1, im_width=1):
"""Generates a collection of bounding boxes to be used as anchors.
The number of anchors generated for a single grid with shape MxM where we
place k boxes over each grid center is k*M^2 and thus the total number of
anchors is the sum over all grids. In our box_specs_list example
(see the constructor docstring), we would place two boxes over each grid
point on an 8x8 grid and three boxes over each grid point on a 4x4 grid and
thus end up with 2*8^2 + 3*4^2 = 176 anchors in total. The layout of the
output anchors follows the order of how the grid sizes and box_specs are
specified (with box_spec index varying the fastest, followed by width
index, then height index, then grid index).
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0), (height_1, width_1), ...]. For example,
setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that
correspond to an 8x8 layer followed by a 7x7 layer.
im_height: the height of the image to generate the grid for. If both
im_height and im_width are 1, the generated anchors default to
absolute coordinates, otherwise normalized coordinates are produced.
im_width: the width of the image to generate the grid for. If both
im_height and im_width are 1, the generated anchors default to
absolute coordinates, otherwise normalized coordinates are produced.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if feature_map_shape_list, box_specs_list do not have the same
length.
ValueError: if feature_map_shape_list does not consist of pairs of
integers
"""
if not (isinstance(feature_map_shape_list, list)
and len(feature_map_shape_list) == len(self._box_specs)):
raise ValueError('feature_map_shape_list must be a list with the same '
'length as self._box_specs')
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in feature_map_shape_list]):
raise ValueError('feature_map_shape_list must be a list of pairs.')
im_height = tf.cast(im_height, dtype=tf.float32)
im_width = tf.cast(im_width, dtype=tf.float32)
if not self._anchor_strides:
anchor_strides = [(1.0 / tf.cast(pair[0], dtype=tf.float32),
1.0 / tf.cast(pair[1], dtype=tf.float32))
for pair in feature_map_shape_list]
else:
anchor_strides = [(tf.cast(stride[0], dtype=tf.float32) / im_height,
tf.cast(stride[1], dtype=tf.float32) / im_width)
for stride in self._anchor_strides]
if not self._anchor_offsets:
anchor_offsets = [(0.5 * stride[0], 0.5 * stride[1])
for stride in anchor_strides]
else:
anchor_offsets = [(tf.cast(offset[0], dtype=tf.float32) / im_height,
tf.cast(offset[1], dtype=tf.float32) / im_width)
for offset in self._anchor_offsets]
for arg, arg_name in zip([anchor_strides, anchor_offsets],
['anchor_strides', 'anchor_offsets']):
if not (isinstance(arg, list) and len(arg) == len(self._box_specs)):
raise ValueError('%s must be a list with the same length '
'as self._box_specs' % arg_name)
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in arg]):
raise ValueError('%s must be a list of pairs.' % arg_name)
anchor_grid_list = []
min_im_shape = tf.minimum(im_height, im_width)
scale_height = min_im_shape / im_height
scale_width = min_im_shape / im_width
if not tf.is_tensor(self._base_anchor_size):
base_anchor_size = [
scale_height * tf.constant(self._base_anchor_size[0],
dtype=tf.float32),
scale_width * tf.constant(self._base_anchor_size[1],
dtype=tf.float32)
]
else:
base_anchor_size = [
scale_height * self._base_anchor_size[0],
scale_width * self._base_anchor_size[1]
]
for feature_map_index, (grid_size, scales, aspect_ratios, stride,
offset) in enumerate(
zip(feature_map_shape_list, self._scales,
self._aspect_ratios, anchor_strides,
anchor_offsets)):
tiled_anchors = grid_anchor_generator.tile_anchors(
grid_height=grid_size[0],
grid_width=grid_size[1],
scales=scales,
aspect_ratios=aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=stride,
anchor_offset=offset)
if self._clip_window is not None:
tiled_anchors = box_list_ops.clip_to_window(
tiled_anchors, self._clip_window, filter_nonoverlapping=False)
num_anchors_in_layer = tiled_anchors.num_boxes_static()
if num_anchors_in_layer is None:
num_anchors_in_layer = tiled_anchors.num_boxes()
anchor_indices = feature_map_index * tf.ones([num_anchors_in_layer])
tiled_anchors.add_field('feature_map_index', anchor_indices)
anchor_grid_list.append(tiled_anchors)
return anchor_grid_list
def create_ssd_anchors(num_layers=6,
min_scale=0.2,
max_scale=0.95,
scales=None,
aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3),
interpolated_scale_aspect_ratio=1.0,
base_anchor_size=None,
anchor_strides=None,
anchor_offsets=None,
reduce_boxes_in_lowest_layer=True):
"""Creates MultipleGridAnchorGenerator for SSD anchors.
This function instantiates a MultipleGridAnchorGenerator that reproduces
``default box`` construction proposed by Liu et al in the SSD paper.
See Section 2.2 for details. Grid sizes are assumed to be passed in
at generation time from finest resolution to coarsest resolution --- this is
used to (linearly) interpolate scales of anchor boxes corresponding to the
intermediate grid sizes.
Anchors that are returned by calling the `generate` method on the returned
MultipleGridAnchorGenerator object are always in normalized coordinates
and clipped to the unit square: (i.e. all coordinates lie in [0, 1]x[0, 1]).
Args:
num_layers: integer number of grid layers to create anchors for (actual
grid sizes passed in at generation time)
min_scale: scale of anchors corresponding to finest resolution (float)
max_scale: scale of anchors corresponding to coarsest resolution (float)
scales: As list of anchor scales to use. When not None and not empty,
min_scale and max_scale are not used.
aspect_ratios: list or tuple of (float) aspect ratios to place on each
grid point.
interpolated_scale_aspect_ratio: An additional anchor is added with this
aspect ratio and a scale interpolated between the scale for a layer
and the scale for the next layer (1.0 for the last layer).
This anchor is not included if this value is 0.
base_anchor_size: base anchor size as [height, width].
The height and width values are normalized to the minimum dimension of the
input height and width, so that when the base anchor height equals the
base anchor width, the resulting anchor is square even if the input image
is not square.
anchor_strides: list of pairs of strides in pixels (in y and x directions
respectively). For example, setting anchor_strides=[(25, 25), (50, 50)]
means that we want the anchors corresponding to the first layer to be
strided by 25 pixels and those in the second layer to be strided by 50
pixels in both y and x directions. If anchor_strides=None, they are set to
be the reciprocal of the corresponding feature map shapes.
anchor_offsets: list of pairs of offsets in pixels (in y and x directions
respectively). The offset specifies where we want the center of the
(0, 0)-th anchor to lie for each layer. For example, setting
anchor_offsets=[(10, 10), (20, 20)]) means that we want the
(0, 0)-th anchor of the first layer to lie at (10, 10) in pixel space
and likewise that we want the (0, 0)-th anchor of the second layer to lie
at (25, 25) in pixel space. If anchor_offsets=None, then they are set to
be half of the corresponding anchor stride.
reduce_boxes_in_lowest_layer: a boolean to indicate whether the fixed 3
boxes per location is used in the lowest layer.
Returns:
a MultipleGridAnchorGenerator
"""
if base_anchor_size is None:
base_anchor_size = [1.0, 1.0]
box_specs_list = []
if scales is None or not scales:
scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1)
for i in range(num_layers)] + [1.0]
else:
# Add 1.0 to the end, which will only be used in scale_next below and used
# for computing an interpolated scale for the largest scale in the list.
scales += [1.0]
for layer, scale, scale_next in zip(
range(num_layers), scales[:-1], scales[1:]):
layer_box_specs = []
if layer == 0 and reduce_boxes_in_lowest_layer:
layer_box_specs = [(0.1, 1.0), (scale, 2.0), (scale, 0.5)]
else:
for aspect_ratio in aspect_ratios:
layer_box_specs.append((scale, aspect_ratio))
# Add one more anchor, with a scale between the current scale, and the
# scale for the next layer, with a specified aspect ratio (1.0 by
# default).
if interpolated_scale_aspect_ratio > 0.0:
layer_box_specs.append((np.sqrt(scale*scale_next),
interpolated_scale_aspect_ratio))
box_specs_list.append(layer_box_specs)
return MultipleGridAnchorGenerator(box_specs_list, base_anchor_size,
anchor_strides, anchor_offsets)
| 16,669 | 47.600583 | 80 | py |
models | models-master/research/object_detection/anchor_generators/flexible_grid_anchor_generator_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generators.flexible_grid_anchor_generator_test.py."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import flexible_grid_anchor_generator as fg
from object_detection.utils import test_case
class FlexibleGridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(16, 16),]
base_sizes = [(128.0,)]
aspect_ratios = [(1.0,)]
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
anchor_corners_out = self.execute(graph_fn, [])
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_unit_dimensions(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(16, 16),]
base_sizes = [(32.0,)]
aspect_ratios = [(1.0,)]
im_height = 1
im_width = 1
feature_map_shape_list = [(2, 2)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
# Positive offsets are produced.
exp_anchor_corners = [[0, 0, 32, 32],
[0, 32, 32, 64],
[32, 0, 64, 32],
[32, 32, 64, 64]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_normalized_anchors_fails_with_unit_dimensions(self):
anchor_generator = fg.FlexibleGridAnchorGenerator(
[(32.0,)], [(1.0,)], [(32, 32),], [(16, 16),],
normalize_coordinates=True)
with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'):
anchor_generator.generate(
feature_map_shape_list=[(2, 2)], im_height=1, im_width=1)
def test_construct_single_anchor_in_normalized_coordinates(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(16, 16),]
base_sizes = [(128.0,)]
aspect_ratios = [(1.0,)]
im_height = 64
im_width = 128
feature_map_shape_list = [(2, 2)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=True)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128],
[-48./64, -16./128, 80./64, 112./128],
[-16./64, -48./128, 112./64, 80./128],
[-16./64, -16./128, 112./64, 112./128]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_num_anchors_per_location(self):
anchor_strides = [(32, 32), (64, 64)]
anchor_offsets = [(16, 16), (32, 32)]
base_sizes = [(32.0, 64.0, 96.0, 32.0, 64.0, 96.0),
(64.0, 128.0, 172.0, 64.0, 128.0, 172.0)]
aspect_ratios = [(1.0, 1.0, 1.0, 2.0, 2.0, 2.0),
(1.0, 1.0, 1.0, 2.0, 2.0, 2.0)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6])
def test_construct_single_anchor_dynamic_size(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(0, 0),]
base_sizes = [(128.0,)]
aspect_ratios = [(1.0,)]
im_height = tf.constant(64)
im_width = tf.constant(64)
feature_map_shape_list = [(2, 2)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
# Zero offsets are used.
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-32, -64, 96, 64],
[-32, -32, 96, 96]]
anchor_corners_out = self.execute_cpu(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_odd_input_dimension(self):
def graph_fn():
anchor_strides = [(32, 32),]
anchor_offsets = [(0, 0),]
base_sizes = [(128.0,)]
aspect_ratios = [(1.0,)]
im_height = 65
im_width = 65
feature_map_shape_list = [(3, 3)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
anchor_corners_out = self.execute(graph_fn, [])
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-64, 0, 64, 128],
[-32, -64, 96, 64],
[-32, -32, 96, 96],
[-32, 0, 96, 128],
[0, -64, 128, 64],
[0, -32, 128, 96],
[0, 0, 128, 128]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_on_two_feature_maps(self):
def graph_fn():
anchor_strides = [(32, 32), (64, 64)]
anchor_offsets = [(16, 16), (32, 32)]
base_sizes = [(128.0,), (256.0,)]
aspect_ratios = [(1.0,), (1.0,)]
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2), (1, 1)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave(self):
def graph_fn():
anchor_strides = [(64, 64),]
anchor_offsets = [(32, 32),]
base_sizes = [(256.0, 362.03867)]
aspect_ratios = [(1.0, 1.0)]
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self):
def graph_fn():
anchor_strides = [(64, 64),]
anchor_offsets = [(32, 32),]
base_sizes = [(256.0, 362.03867, 256.0, 362.03867)]
aspect_ratios = [(1.0, 1.0, 2.0, 2.0)]
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect],
# [2**0.0 intermediate scale + 2.0 aspect],
# [2**0.5 intermediate scale + 2.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193],
[-58.50967, -149.0193, 122.50967, 213.0193],
[-96., -224., 160., 288.]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self):
def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height,
feature_map2_width):
anchor_strides = [(32, 32), (64, 64)]
anchor_offsets = [(16, 16), (32, 32)]
base_sizes = [(128.0,), (256.0,)]
aspect_ratios = [(1.0,), (1.0,)]
im_height = 64
im_width = 64
feature_map_shape_list = [(feature_map1_height, feature_map1_width),
(feature_map2_height, feature_map2_width)]
anchor_generator = fg.FlexibleGridAnchorGenerator(
base_sizes, aspect_ratios, anchor_strides, anchor_offsets,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(
self.execute_cpu(graph_fn, [
np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32),
np.array(1, dtype=np.int32),
np.array(1, dtype=np.int32)
]),
axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
| 12,582 | 41.945392 | 83 | py |
models | models-master/research/object_detection/anchor_generators/grid_anchor_generator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates grid anchors on the fly as used in Faster RCNN.
Generates grid anchors on the fly as described in:
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks"
Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun.
"""
import tensorflow.compat.v1 as tf
from object_detection.core import anchor_generator
from object_detection.core import box_list
from object_detection.utils import ops
class GridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generates a grid of anchors at given scales and aspect ratios."""
def __init__(self,
scales=(0.5, 1.0, 2.0),
aspect_ratios=(0.5, 1.0, 2.0),
base_anchor_size=None,
anchor_stride=None,
anchor_offset=None):
"""Constructs a GridAnchorGenerator.
Args:
scales: a list of (float) scales, default=(0.5, 1.0, 2.0)
aspect_ratios: a list of (float) aspect ratios, default=(0.5, 1.0, 2.0)
base_anchor_size: base anchor size as height, width (
(length-2 float32 list or tensor, default=[256, 256])
anchor_stride: difference in centers between base anchors for adjacent
grid positions (length-2 float32 list or tensor,
default=[16, 16])
anchor_offset: center of the anchor with scale and aspect ratio 1 for the
upper left element of the grid, this should be zero for
feature networks with only VALID padding and even receptive
field size, but may need additional calculation if other
padding is used (length-2 float32 list or tensor,
default=[0, 0])
"""
# Handle argument defaults
if base_anchor_size is None:
base_anchor_size = [256, 256]
if anchor_stride is None:
anchor_stride = [16, 16]
if anchor_offset is None:
anchor_offset = [0, 0]
self._scales = scales
self._aspect_ratios = aspect_ratios
self._base_anchor_size = base_anchor_size
self._anchor_stride = anchor_stride
self._anchor_offset = anchor_offset
def name_scope(self):
return 'GridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.
"""
return [len(self._scales) * len(self._aspect_ratios)]
def _generate(self, feature_map_shape_list):
"""Generates a collection of bounding boxes to be used as anchors.
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0)]. For example, setting
feature_map_shape_list=[(8, 8)] asks for anchors that correspond
to an 8x8 layer. For this anchor generator, only lists of length 1 are
allowed.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if feature_map_shape_list, box_specs_list do not have the same
length.
ValueError: if feature_map_shape_list does not consist of pairs of
integers
"""
if not (isinstance(feature_map_shape_list, list)
and len(feature_map_shape_list) == 1):
raise ValueError('feature_map_shape_list must be a list of length 1.')
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in feature_map_shape_list]):
raise ValueError('feature_map_shape_list must be a list of pairs.')
# Create constants in init_scope so they can be created in tf.functions
# and accessed from outside of the function.
with tf.init_scope():
self._base_anchor_size = tf.cast(tf.convert_to_tensor(
self._base_anchor_size), dtype=tf.float32)
self._anchor_stride = tf.cast(tf.convert_to_tensor(
self._anchor_stride), dtype=tf.float32)
self._anchor_offset = tf.cast(tf.convert_to_tensor(
self._anchor_offset), dtype=tf.float32)
grid_height, grid_width = feature_map_shape_list[0]
scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales,
self._aspect_ratios)
scales_grid = tf.reshape(scales_grid, [-1])
aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [-1])
anchors = tile_anchors(grid_height,
grid_width,
scales_grid,
aspect_ratios_grid,
self._base_anchor_size,
self._anchor_stride,
self._anchor_offset)
num_anchors = anchors.num_boxes_static()
if num_anchors is None:
num_anchors = anchors.num_boxes()
anchor_indices = tf.zeros([num_anchors])
anchors.add_field('feature_map_index', anchor_indices)
return [anchors]
def tile_anchors(grid_height,
grid_width,
scales,
aspect_ratios,
base_anchor_size,
anchor_stride,
anchor_offset):
"""Create a tiled set of anchors strided along a grid in image space.
This op creates a set of anchor boxes by placing a "basis" collection of
boxes with user-specified scales and aspect ratios centered at evenly
distributed points along a grid. The basis collection is specified via the
scale and aspect_ratios arguments. For example, setting scales=[.1, .2, .2]
and aspect ratios = [2,2,1/2] means that we create three boxes: one with scale
.1, aspect ratio 2, one with scale .2, aspect ratio 2, and one with scale .2
and aspect ratio 1/2. Each box is multiplied by "base_anchor_size" before
placing it over its respective center.
Grid points are specified via grid_height, grid_width parameters as well as
the anchor_stride and anchor_offset parameters.
Args:
grid_height: size of the grid in the y direction (int or int scalar tensor)
grid_width: size of the grid in the x direction (int or int scalar tensor)
scales: a 1-d (float) tensor representing the scale of each box in the
basis set.
aspect_ratios: a 1-d (float) tensor representing the aspect ratio of each
box in the basis set. The length of the scales and aspect_ratios tensors
must be equal.
base_anchor_size: base anchor size as [height, width]
(float tensor of shape [2])
anchor_stride: difference in centers between base anchors for adjacent grid
positions (float tensor of shape [2])
anchor_offset: center of the anchor with scale and aspect ratio 1 for the
upper left element of the grid, this should be zero for
feature networks with only VALID padding and even receptive
field size, but may need some additional calculation if other
padding is used (float tensor of shape [2])
Returns:
a BoxList holding a collection of N anchor boxes
"""
ratio_sqrts = tf.sqrt(aspect_ratios)
heights = scales / ratio_sqrts * base_anchor_size[0]
widths = scales * ratio_sqrts * base_anchor_size[1]
# Get a grid of box centers
y_centers = tf.cast(tf.range(grid_height), dtype=tf.float32)
y_centers = y_centers * anchor_stride[0] + anchor_offset[0]
x_centers = tf.cast(tf.range(grid_width), dtype=tf.float32)
x_centers = x_centers * anchor_stride[1] + anchor_offset[1]
x_centers, y_centers = ops.meshgrid(x_centers, y_centers)
widths_grid, x_centers_grid = ops.meshgrid(widths, x_centers)
heights_grid, y_centers_grid = ops.meshgrid(heights, y_centers)
bbox_centers = tf.stack([y_centers_grid, x_centers_grid], axis=3)
bbox_sizes = tf.stack([heights_grid, widths_grid], axis=3)
bbox_centers = tf.reshape(bbox_centers, [-1, 2])
bbox_sizes = tf.reshape(bbox_sizes, [-1, 2])
bbox_corners = _center_size_bbox_to_corners_bbox(bbox_centers, bbox_sizes)
return box_list.BoxList(bbox_corners)
def _center_size_bbox_to_corners_bbox(centers, sizes):
"""Converts bbox center-size representation to corners representation.
Args:
centers: a tensor with shape [N, 2] representing bounding box centers
sizes: a tensor with shape [N, 2] representing bounding boxes
Returns:
corners: tensor with shape [N, 4] representing bounding boxes in corners
representation
"""
return tf.concat([centers - .5 * sizes, centers + .5 * sizes], 1)
| 9,212 | 42.051402 | 80 | py |
models | models-master/research/object_detection/anchor_generators/multiscale_grid_anchor_generator_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generators.multiscale_grid_anchor_generator_test.py."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import multiscale_grid_anchor_generator as mg
from object_detection.utils import test_case
class MultiscaleGridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_unit_dimensions(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 1.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 1
im_width = 1
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
# Positive offsets are produced.
exp_anchor_corners = [[0, 0, 32, 32],
[0, 32, 32, 64],
[32, 0, 64, 32],
[32, 32, 64, 64]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_normalized_anchors_fails_with_unit_dimensions(self):
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level=5, max_level=5, anchor_scale=1.0, aspect_ratios=[1.0],
scales_per_octave=1, normalize_coordinates=True)
with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'):
anchor_generator.generate(
feature_map_shape_list=[(2, 2)], im_height=1, im_width=1)
def test_construct_single_anchor_in_normalized_coordinates(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 128
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=True)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128],
[-48./64, -16./128, 80./64, 112./128],
[-16./64, -48./128, 112./64, 80./128],
[-16./64, -16./128, 112./64, 112./128]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_num_anchors_per_location(self):
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0, 2.0]
scales_per_octave = 3
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6])
def test_construct_single_anchor_dynamic_size(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = tf.constant(64)
im_width = tf.constant(64)
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-32, -64, 96, 64],
[-32, -32, 96, 96]]
# Add anchor offset.
anchor_offset = 2.0**5 / 2.0
exp_anchor_corners = [
[b + anchor_offset for b in a] for a in exp_anchor_corners
]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_odd_input_dimension(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 65
im_width = 65
feature_map_shape_list = [(3, 3)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
anchor_corners_out = self.execute(graph_fn, [])
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-64, 0, 64, 128],
[-32, -64, 96, 64],
[-32, -32, 96, 96],
[-32, 0, 96, 128],
[0, -64, 128, 64],
[0, -32, 128, 96],
[0, 0, 128, 128]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_on_two_feature_maps(self):
def graph_fn():
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2), (1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave(self):
def graph_fn():
min_level = 6
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 2
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self):
def graph_fn():
min_level = 6
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0, 2.0]
scales_per_octave = 2
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect],
# [2**0.0 intermediate scale + 2.0 aspect],
# [2**0.5 intermediate scale + 2.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193],
[-58.50967, -149.0193, 122.50967, 213.0193],
[-96., -224., 160., 288.]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self):
def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height,
feature_map2_width):
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(feature_map1_height, feature_map1_width),
(feature_map2_height, feature_map2_width)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(
self.execute_cpu(graph_fn, [
np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32),
np.array(1, dtype=np.int32),
np.array(1, dtype=np.int32)
]),
axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
| 12,490 | 39.423948 | 85 | py |
models | models-master/research/object_detection/anchor_generators/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/anchor_generators/grid_anchor_generator_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.grid_anchor_generator."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.utils import test_case
class GridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
"""Builds a 1x1 anchor grid to test the size of the output boxes."""
def graph_fn():
scales = [0.5, 1.0, 2.0]
aspect_ratios = [0.25, 1.0, 4.0]
anchor_offset = [7, -3]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales, aspect_ratios, anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(feature_map_shape_list=[(1, 1)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61],
[-505, -131, 519, 125], [-57, -67, 71, 61],
[-121, -131, 135, 125], [-249, -259, 263, 253],
[-25, -131, 39, 125], [-57, -259, 71, 253],
[-121, -515, 135, 509]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid(self):
def graph_fn():
base_anchor_size = [10, 10]
anchor_stride = [19, 19]
anchor_offset = [0, 0]
scales = [0.5, 1.0, 2.0]
aspect_ratios = [1.0]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(feature_map_shape_list=[(2, 2)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_anchor_grid_with_dynamic_feature_map_shapes(self):
def graph_fn(feature_map_height, feature_map_width):
base_anchor_size = [10, 10]
anchor_stride = [19, 19]
anchor_offset = [0, 0]
scales = [0.5, 1.0, 2.0]
aspect_ratios = [1.0]
anchor_generator = grid_anchor_generator.GridAnchorGenerator(
scales,
aspect_ratios,
base_anchor_size=base_anchor_size,
anchor_stride=anchor_stride,
anchor_offset=anchor_offset)
anchors_list = anchor_generator.generate(
feature_map_shape_list=[(feature_map_height, feature_map_width)])
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],
[-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],
[-5., 14., 5, 24], [-10., 9., 10, 29],
[16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],
[9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],
[14., 14., 24, 24], [9., 9., 29, 29]]
anchor_corners_out = self.execute_cpu(graph_fn,
[np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32)])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
| 4,519 | 42.047619 | 80 | py |
models | models-master/research/object_detection/models/bidirectional_feature_pyramid_generators_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bidirectional feature pyramid generators."""
import unittest
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
@parameterized.parameters({'bifpn_num_iterations': 2},
{'bifpn_num_iterations': 8})
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class BiFPNFeaturePyramidGeneratorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
force_use_bias: true
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_get_expected_feature_map_shapes(self, bifpn_num_iterations):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32))
]
bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps(
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=128,
fpn_min_level=3,
fpn_max_level=7,
input_max_level=5,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False)
def graph_fn():
feature_maps = bifpn_generator(image_features)
return feature_maps
expected_feature_map_shapes = {
'{}_dn_lvl_3'.format(bifpn_num_iterations): (4, 16, 16, 128),
'{}_up_lvl_4'.format(bifpn_num_iterations): (4, 8, 8, 128),
'{}_up_lvl_5'.format(bifpn_num_iterations): (4, 4, 4, 128),
'{}_up_lvl_6'.format(bifpn_num_iterations): (4, 2, 2, 128),
'{}_up_lvl_7'.format(bifpn_num_iterations): (4, 1, 1, 128)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names(self, bifpn_num_iterations):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block3', tf.random_uniform([4, 16, 16, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32))
]
bifpn_generator = bifpn_generators.KerasBiFpnFeatureMaps(
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=128,
fpn_min_level=3,
fpn_max_level=7,
input_max_level=5,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
name='bifpn')
def graph_fn():
return bifpn_generator(image_features)
self.execute(graph_fn, [], g)
expected_variables = [
'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/bias',
'bifpn/node_00/0_up_lvl_6/input_0_up_lvl_5/1x1_pre_sample/conv/kernel',
'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias',
'bifpn/node_03/1_dn_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel',
'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias',
'bifpn/node_04/1_dn_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel',
'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/bias',
'bifpn/node_05/1_dn_lvl_3/input_0_up_lvl_3/1x1_pre_sample/conv/kernel',
'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/bias',
'bifpn/node_06/1_up_lvl_4/input_0_up_lvl_4/1x1_pre_sample/conv/kernel',
'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/bias',
'bifpn/node_07/1_up_lvl_5/input_0_up_lvl_5/1x1_pre_sample/conv/kernel']
expected_node_variable_patterns = [
['bifpn/node_{:02}/{}_dn_lvl_6/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_dn_lvl_6/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_dn_lvl_5/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_dn_lvl_5/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_dn_lvl_4/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_dn_lvl_4/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_dn_lvl_3/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_dn_lvl_3/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_up_lvl_4/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_up_lvl_4/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_up_lvl_5/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_up_lvl_5/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_up_lvl_6/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_up_lvl_6/post_combine/separable_conv/pointwise_kernel'],
['bifpn/node_{:02}/{}_up_lvl_7/combine/bifpn_combine_weights',
'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/bias',
'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/depthwise_kernel',
'bifpn/node_{:02}/{}_up_lvl_7/post_combine/separable_conv/pointwise_kernel']]
node_i = 2
for iter_i in range(1, bifpn_num_iterations+1):
for node_variable_patterns in expected_node_variable_patterns:
for pattern in node_variable_patterns:
expected_variables.append(pattern.format(node_i, iter_i))
node_i += 1
expected_variables = set(expected_variables)
actual_variable_set = set(
[var.name.split(':')[0] for var in bifpn_generator.variables])
self.assertSetEqual(expected_variables, actual_variable_set)
# TODO(aom): Tests for create_bifpn_combine_op.
if __name__ == '__main__':
tf.test.main()
| 8,350 | 48.708333 | 96 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet based Faster R-CNN implementation in Keras.
See Deep Residual Learning for Image Recognition by He et al.
https://arxiv.org/abs/1512.03385
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models.keras_models import resnet_v1
from object_detection.utils import model_util
_RESNET_MODEL_CONV4_LAST_LAYERS = {
'resnet_v1_50': 'conv4_block6_out',
'resnet_v1_101': 'conv4_block23_out',
'resnet_v1_152': 'conv4_block36_out',
}
class FasterRCNNResnetKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster R-CNN with Resnet feature extractor implementation."""
def __init__(self,
is_training,
resnet_v1_base_model,
resnet_v1_base_model_name,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 16.')
super(FasterRCNNResnetKerasFeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
weight_decay)
self.classification_backbone = None
self._variable_dict = {}
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the first half of the Resnet v1 network.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A tensor with shape [batch, height, width, depth]
"""
if not self.classification_backbone:
self.classification_backbone = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=None,
weight_decay=self._weight_decay,
classes=None,
weights=None,
include_top=False
)
with tf.name_scope(name):
with tf.name_scope('ResnetV1'):
conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[
self._resnet_v1_base_model_name]
proposal_features = self.classification_backbone.get_layer(
name=conv4_last_layer).output
keras_model = tf.keras.Model(
inputs=self.classification_backbone.inputs,
outputs=proposal_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
This function reconstructs the "second half" of the ResNet v1
network after the part defined in `get_proposal_feature_extractor_model`.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
if not self.classification_backbone:
self.classification_backbone = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=None,
weight_decay=self._weight_decay,
classes=None,
weights=None,
include_top=False
)
with tf.name_scope(name):
with tf.name_scope('ResnetV1'):
conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[
self._resnet_v1_base_model_name]
proposal_feature_maps = self.classification_backbone.get_layer(
name=conv4_last_layer).output
proposal_classifier_features = self.classification_backbone.get_layer(
name='conv5_block3_out').output
keras_model = model_util.extract_submodel(
model=self.classification_backbone,
inputs=proposal_feature_maps,
outputs=proposal_classifier_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
class FasterRCNNResnet50KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet50 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
class FasterRCNNResnet101KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet101 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet101KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
class FasterRCNNResnet152KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet152 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet152KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
| 9,440 | 36.023529 | 80 | py |
models | models-master/research/object_detection/models/ssd_spaghettinet_feature_extractor.py | """SpaghettiNet Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.python.training import moving_averages
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.utils import ops
from object_detection.utils import shape_utils
IbnOp = collections.namedtuple(
'IbnOp', ['kernel_size', 'expansion_rate', 'stride', 'has_residual'])
SepConvOp = collections.namedtuple('SepConvOp',
['kernel_size', 'stride', 'has_residual'])
IbnFusedGrouped = collections.namedtuple(
'IbnFusedGrouped',
['kernel_size', 'expansion_rate', 'stride', 'groups', 'has_residual'])
SpaghettiStemNode = collections.namedtuple('SpaghettiStemNode',
['kernel_size', 'num_filters'])
SpaghettiNode = collections.namedtuple(
'SpaghettiNode', ['layers', 'num_filters', 'edges', 'level'])
SpaghettiResampleEdge = collections.namedtuple('SpaghettiResampleEdge',
['input'])
SpaghettiPassthroughEdge = collections.namedtuple('SpaghettiPassthroughEdge',
['input'])
SpaghettiNodeSpecs = collections.namedtuple('SpaghettiNodeSpecs',
['nodes', 'outputs'])
class SpaghettiNet():
"""SpaghettiNet."""
def __init__(self,
node_specs,
is_training=False,
use_native_resize_op=False,
use_explicit_padding=False,
activation_fn=tf.nn.relu6,
normalization_fn=slim.batch_norm,
name='spaghetti_node'):
self._node_specs = node_specs
self._is_training = is_training
self._use_native_resize_op = use_native_resize_op
self._use_explicit_padding = use_explicit_padding
self._activation_fn = activation_fn
self._normalization_fn = normalization_fn
self._name = name
self._nodes = {}
def _quant_var(self,
name,
initializer_val,
vars_collection=tf.GraphKeys.MOVING_AVERAGE_VARIABLES):
"""Create an var for storing the min/max quantization range."""
return slim.model_variable(
name,
shape=[],
initializer=tf.constant_initializer(initializer_val),
collections=[vars_collection],
trainable=False)
def _quantizable_concat(self,
inputs,
axis,
is_training,
is_quantized=True,
default_min=0,
default_max=6,
ema_decay=0.999,
scope='quantized_concat'):
"""Concat replacement with quantization option.
Allows concat inputs to share the same min max ranges,
from experimental/gazelle/synthetic/model/tpu/utils.py.
Args:
inputs: list of tensors to concatenate.
axis: dimension along which to concatenate.
is_training: true if the graph is a training graph.
is_quantized: flag to enable/disable quantization.
default_min: default min value for fake quant op.
default_max: default max value for fake quant op.
ema_decay: the moving average decay for the quantization variables.
scope: Optional scope for variable_scope.
Returns:
Tensor resulting from concatenation of input tensors
"""
if is_quantized:
with tf.variable_scope(scope):
min_var = self._quant_var('min', default_min)
max_var = self._quant_var('max', default_max)
if not is_training:
# If we are building an eval graph just use the values in the
# variables.
quant_inputs = [
tf.fake_quant_with_min_max_vars(t, min_var, max_var)
for t in inputs
]
else:
concat_tensors = tf.concat(inputs, axis=axis)
tf.logging.info('concat_tensors: {}'.format(concat_tensors))
# TFLite requires that 0.0 is always in the [min; max] range.
range_min = tf.minimum(
tf.reduce_min(concat_tensors), 0.0, name='SafeQuantRangeMin')
range_max = tf.maximum(
tf.reduce_max(concat_tensors), 0.0, name='SafeQuantRangeMax')
# Otherwise we need to keep track of the moving averages of the min
# and of the elements of the input tensor max.
min_val = moving_averages.assign_moving_average(
min_var, range_min, ema_decay, name='AssignMinEma')
max_val = moving_averages.assign_moving_average(
max_var, range_max, ema_decay, name='AssignMaxEma')
quant_inputs = [
tf.fake_quant_with_min_max_vars(t, min_val, max_val)
for t in inputs
]
outputs = tf.concat(quant_inputs, axis=axis)
else:
outputs = tf.concat(inputs, axis=axis)
return outputs
def _expanded_conv(self, net, num_filters, expansion_rates, kernel_size,
stride, scope):
"""Expanded convolution."""
expanded_num_filters = num_filters * expansion_rates
add_fixed_padding = self._use_explicit_padding and stride > 1
padding = 'VALID' if add_fixed_padding else 'SAME'
net = slim.conv2d(
net,
expanded_num_filters, [1, 1],
activation_fn=self._activation_fn,
normalizer_fn=self._normalization_fn,
padding=padding,
scope=scope + '/expansion')
net = slim.separable_conv2d(
ops.fixed_padding(net, kernel_size) if add_fixed_padding else net,
num_outputs=None,
kernel_size=kernel_size,
activation_fn=self._activation_fn,
normalizer_fn=self._normalization_fn,
stride=stride,
padding=padding,
scope=scope + '/depthwise')
net = slim.conv2d(
net,
num_filters, [1, 1],
activation_fn=tf.identity,
normalizer_fn=self._normalization_fn,
padding=padding,
scope=scope + '/projection')
return net
def _slice_shape_along_axis(self, shape, axis, groups):
"""Returns the shape after slicing into groups along the axis."""
if isinstance(shape, tf.TensorShape):
shape_as_list = shape.as_list()
if shape_as_list[axis] % groups != 0:
raise ValueError('Dimension {} must be divisible by {} groups'.format(
shape_as_list[axis], groups))
shape_as_list[axis] = shape_as_list[axis] // groups
return tf.TensorShape(shape_as_list)
elif isinstance(shape, tf.Tensor) and shape.shape.rank == 1:
shape_as_list = tf.unstack(shape)
shape_as_list[axis] = shape_as_list[axis] // groups
return tf.stack(shape_as_list)
else:
raise ValueError(
'Shape should be a TensorShape or rank-1 Tensor, but got: {}'.format(
shape))
def _ibn_fused_grouped(self, net, num_filters, expansion_rates, kernel_size,
stride, groups, scope):
"""Fused grouped IBN convolution."""
add_fixed_padding = self._use_explicit_padding and stride > 1
padding = 'VALID' if add_fixed_padding else 'SAME'
slice_shape = self._slice_shape_along_axis(net.shape, -1, groups)
slice_begin = [0] * net.shape.rank
slice_outputs = []
output_filters_per_group = net.shape[-1] // groups
expanded_num_filters_per_group = output_filters_per_group * expansion_rates
for idx in range(groups):
slice_input = tf.slice(net, slice_begin, slice_shape)
if isinstance(slice_shape, tf.TensorShape):
slice_begin[-1] += slice_shape.as_list()[-1]
else:
slice_begin[-1] += slice_shape[-1]
slice_outputs.append(
slim.conv2d(
ops.fixed_padding(slice_input, kernel_size)
if add_fixed_padding else slice_input,
expanded_num_filters_per_group,
kernel_size,
activation_fn=self._activation_fn,
normalizer_fn=self._normalization_fn,
stride=stride,
padding=padding,
scope='{}/{}_{}'.format(scope, 'slice', idx)))
# Make inputs to the concat share the same quantization variables.
net = self._quantizable_concat(
slice_outputs,
-1,
self._is_training,
scope='{}/{}'.format(scope, 'concat'))
net = slim.conv2d(
net,
num_filters, [1, 1],
activation_fn=tf.identity,
normalizer_fn=self._normalization_fn,
padding=padding,
scope=scope + '/projection')
return net
def _sep_conv(self, net, num_filters, kernel_size, stride, scope):
"""Depthwise Separable convolution."""
add_fixed_padding = self._use_explicit_padding and stride > 1
padding = 'VALID' if add_fixed_padding else 'SAME'
net = slim.separable_conv2d(
ops.fixed_padding(net, kernel_size) if add_fixed_padding else net,
num_outputs=None,
kernel_size=kernel_size,
activation_fn=None,
normalizer_fn=None,
stride=stride,
padding=padding,
scope=scope + '/depthwise')
net = slim.conv2d(
net,
num_filters, [1, 1],
activation_fn=self._activation_fn,
normalizer_fn=self._normalization_fn,
padding=padding,
scope=scope + '/pointwise')
return net
def _upsample(self, net, num_filters, upsample_ratio, scope):
"""Perform 1x1 conv then nearest neighbor upsampling."""
node_pre_up = slim.conv2d(
net,
num_filters, [1, 1],
activation_fn=tf.identity,
normalizer_fn=self._normalization_fn,
padding='SAME',
scope=scope + '/1x1_before_upsample')
if self._use_native_resize_op:
with tf.name_scope(scope + '/nearest_neighbor_upsampling'):
input_shape = shape_utils.combined_static_and_dynamic_shape(node_pre_up)
node_up = tf.image.resize_nearest_neighbor(
node_pre_up,
[input_shape[1] * upsample_ratio, input_shape[2] * upsample_ratio])
else:
node_up = ops.nearest_neighbor_upsampling(
node_pre_up, scale=upsample_ratio)
return node_up
def _downsample(self, net, num_filters, downsample_ratio, scope):
"""Perform maxpool downsampling then 1x1 conv."""
add_fixed_padding = self._use_explicit_padding and downsample_ratio > 1
padding = 'VALID' if add_fixed_padding else 'SAME'
node_down = slim.max_pool2d(
ops.fixed_padding(net, downsample_ratio +
1) if add_fixed_padding else net,
[downsample_ratio + 1, downsample_ratio + 1],
stride=[downsample_ratio, downsample_ratio],
padding=padding,
scope=scope + '/maxpool_downsampling')
node_after_down = slim.conv2d(
node_down,
num_filters, [1, 1],
activation_fn=tf.identity,
normalizer_fn=self._normalization_fn,
padding=padding,
scope=scope + '/1x1_after_downsampling')
return node_after_down
def _no_resample(self, net, num_filters, scope):
return slim.conv2d(
net,
num_filters, [1, 1],
activation_fn=tf.identity,
normalizer_fn=self._normalization_fn,
padding='SAME',
scope=scope + '/1x1_no_resampling')
def _spaghetti_node(self, node, scope):
"""Spaghetti node."""
node_spec = self._node_specs.nodes[node]
# Make spaghetti edges
edge_outputs = []
edge_min_level = 100 # Currently we don't have any level over 7.
edge_output_shape = None
for edge in node_spec.edges:
if isinstance(edge, SpaghettiPassthroughEdge):
assert len(node_spec.edges) == 1, len(node_spec.edges)
edge_outputs.append(self._nodes[edge.input])
elif isinstance(edge, SpaghettiResampleEdge):
edge_outputs.append(
self._spaghetti_edge(node, edge.input,
'edge_{}_{}'.format(edge.input, node)))
if edge_min_level > self._node_specs.nodes[edge.input].level:
edge_min_level = self._node_specs.nodes[edge.input].level
edge_output_shape = tf.shape(edge_outputs[-1])
else:
raise ValueError('Unknown edge type {}'.format(edge))
if len(edge_outputs) == 1:
# When edge_outputs' length is 1, it is passthrough edge.
net = edge_outputs[-1]
else:
# When edge_outputs' length is over 1, need to crop and then add edges.
net = edge_outputs[0][:, :edge_output_shape[1], :edge_output_shape[2], :]
for edge_output in edge_outputs[1:]:
net += edge_output[:, :edge_output_shape[1], :edge_output_shape[2], :]
net = self._activation_fn(net)
# Make spaghetti node
for idx, layer_spec in enumerate(node_spec.layers):
if isinstance(layer_spec, IbnOp):
net_exp = self._expanded_conv(net, node_spec.num_filters,
layer_spec.expansion_rate,
layer_spec.kernel_size, layer_spec.stride,
'{}_{}'.format(scope, idx))
elif isinstance(layer_spec, IbnFusedGrouped):
net_exp = self._ibn_fused_grouped(net, node_spec.num_filters,
layer_spec.expansion_rate,
layer_spec.kernel_size,
layer_spec.stride, layer_spec.groups,
'{}_{}'.format(scope, idx))
elif isinstance(layer_spec, SepConvOp):
net_exp = self._sep_conv(net, node_spec.num_filters,
layer_spec.kernel_size, layer_spec.stride,
'{}_{}'.format(scope, idx))
else:
raise ValueError('Unsupported layer_spec: {}'.format(layer_spec))
# Skip connection for all layers other than the first in a node.
net = net_exp + net if layer_spec.has_residual else net_exp
self._nodes[node] = net
def _spaghetti_edge(self, curr_node, prev_node, scope):
"""Create an edge between curr_node and prev_node."""
curr_spec = self._node_specs.nodes[curr_node]
prev_spec = self._node_specs.nodes[prev_node]
if curr_spec.level < prev_spec.level:
# upsample
output = self._upsample(self._nodes[prev_node], curr_spec.num_filters,
2**(prev_spec.level - curr_spec.level), scope)
elif curr_spec.level > prev_spec.level:
# downsample
output = self._downsample(self._nodes[prev_node], curr_spec.num_filters,
2**(curr_spec.level - prev_spec.level), scope)
else:
# 1x1
output = self._no_resample(self._nodes[prev_node], curr_spec.num_filters,
scope)
return output
def _spaghetti_stem_node(self, net, node, scope):
stem_spec = self._node_specs.nodes[node]
kernel_size = stem_spec.kernel_size
padding = 'VALID' if self._use_explicit_padding else 'SAME'
self._nodes[node] = slim.conv2d(
ops.fixed_padding(net, kernel_size)
if self._use_explicit_padding else net,
stem_spec.num_filters, [kernel_size, kernel_size],
stride=2,
activation_fn=self._activation_fn,
normalizer_fn=self._normalization_fn,
padding=padding,
scope=scope + '/stem')
def apply(self, net, scope='spaghetti_net'):
"""Apply the SpaghettiNet to the input and return nodes in outputs."""
for node, node_spec in self._node_specs.nodes.items():
if isinstance(node_spec, SpaghettiStemNode):
self._spaghetti_stem_node(net, node, '{}/stem_node'.format(scope))
elif isinstance(node_spec, SpaghettiNode):
self._spaghetti_node(node, '{}/{}'.format(scope, node))
else:
raise ValueError('Unknown node {}: {}'.format(node, node_spec))
return [self._nodes[x] for x in self._node_specs.outputs]
def _spaghettinet_edgetpu_s():
"""Architecture definition for SpaghettiNet-EdgeTPU-S."""
nodes = collections.OrderedDict()
outputs = ['c0n1', 'c0n2', 'c0n3', 'c0n4', 'c0n5']
nodes['s0'] = SpaghettiStemNode(kernel_size=5, num_filters=24)
nodes['n0'] = SpaghettiNode(
num_filters=48,
level=2,
layers=[
IbnFusedGrouped(3, 8, 2, 3, False),
],
edges=[SpaghettiPassthroughEdge(input='s0')])
nodes['n1'] = SpaghettiNode(
num_filters=64,
level=3,
layers=[
IbnFusedGrouped(3, 4, 2, 4, False),
IbnFusedGrouped(3, 4, 1, 4, True),
IbnFusedGrouped(3, 4, 1, 4, True),
],
edges=[SpaghettiPassthroughEdge(input='n0')])
nodes['n2'] = SpaghettiNode(
num_filters=72,
level=4,
layers=[
IbnOp(3, 8, 2, False),
IbnFusedGrouped(3, 8, 1, 4, True),
IbnOp(3, 8, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n1')])
nodes['n3'] = SpaghettiNode(
num_filters=88,
level=5,
layers=[
IbnOp(3, 8, 2, False),
IbnOp(3, 8, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n2')])
nodes['n4'] = SpaghettiNode(
num_filters=88,
level=6,
layers=[
IbnOp(3, 8, 2, False),
SepConvOp(5, 1, True),
SepConvOp(5, 1, True),
SepConvOp(5, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n3')])
nodes['n5'] = SpaghettiNode(
num_filters=88,
level=7,
layers=[
SepConvOp(5, 2, False),
SepConvOp(3, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n4')])
nodes['c0n0'] = SpaghettiNode(
num_filters=144,
level=5,
layers=[
IbnOp(3, 4, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n3'),
SpaghettiResampleEdge(input='n4')
])
nodes['c0n1'] = SpaghettiNode(
num_filters=120,
level=4,
layers=[
IbnOp(3, 8, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n2'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n2'] = SpaghettiNode(
num_filters=168,
level=5,
layers=[
IbnOp(3, 4, 1, False),
],
edges=[
SpaghettiResampleEdge(input='c0n1'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n3'] = SpaghettiNode(
num_filters=136,
level=6,
layers=[
IbnOp(3, 4, 1, False),
SepConvOp(3, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n4'] = SpaghettiNode(
num_filters=136,
level=7,
layers=[
IbnOp(3, 4, 1, False),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n5'] = SpaghettiNode(
num_filters=64,
level=8,
layers=[
SepConvOp(3, 1, False),
SepConvOp(3, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='c0n4')])
node_specs = SpaghettiNodeSpecs(nodes=nodes, outputs=outputs)
return node_specs
def _spaghettinet_edgetpu_m():
"""Architecture definition for SpaghettiNet-EdgeTPU-M."""
nodes = collections.OrderedDict()
outputs = ['c0n1', 'c0n2', 'c0n3', 'c0n4', 'c0n5']
nodes['s0'] = SpaghettiStemNode(kernel_size=5, num_filters=24)
nodes['n0'] = SpaghettiNode(
num_filters=48,
level=2,
layers=[
IbnFusedGrouped(3, 8, 2, 3, False),
],
edges=[SpaghettiPassthroughEdge(input='s0')])
nodes['n1'] = SpaghettiNode(
num_filters=64,
level=3,
layers=[
IbnFusedGrouped(3, 8, 2, 4, False),
IbnFusedGrouped(3, 4, 1, 4, True),
IbnFusedGrouped(3, 4, 1, 4, True),
IbnFusedGrouped(3, 4, 1, 4, True),
],
edges=[SpaghettiPassthroughEdge(input='n0')])
nodes['n2'] = SpaghettiNode(
num_filters=72,
level=4,
layers=[
IbnOp(3, 8, 2, False),
IbnFusedGrouped(3, 8, 1, 4, True),
IbnOp(3, 8, 1, True),
IbnOp(3, 8, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n1')])
nodes['n3'] = SpaghettiNode(
num_filters=96,
level=5,
layers=[
IbnOp(3, 8, 2, False),
IbnOp(3, 8, 1, True),
IbnOp(3, 8, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n2')])
nodes['n4'] = SpaghettiNode(
num_filters=104,
level=6,
layers=[
IbnOp(3, 8, 2, False),
IbnOp(3, 4, 1, True),
SepConvOp(5, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n3')])
nodes['n5'] = SpaghettiNode(
num_filters=56,
level=7,
layers=[
SepConvOp(5, 2, False),
SepConvOp(3, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n4')])
nodes['c0n0'] = SpaghettiNode(
num_filters=152,
level=5,
layers=[
IbnOp(3, 8, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n3'),
SpaghettiResampleEdge(input='n4')
])
nodes['c0n1'] = SpaghettiNode(
num_filters=120,
level=4,
layers=[
IbnOp(3, 8, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n2'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n2'] = SpaghettiNode(
num_filters=168,
level=5,
layers=[
IbnOp(3, 4, 1, False),
SepConvOp(3, 1, True),
],
edges=[
SpaghettiResampleEdge(input='c0n1'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n3'] = SpaghettiNode(
num_filters=136,
level=6,
layers=[
SepConvOp(3, 1, False),
SepConvOp(3, 1, True),
SepConvOp(3, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n4'] = SpaghettiNode(
num_filters=136,
level=7,
layers=[
IbnOp(3, 4, 1, False),
SepConvOp(5, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n5'] = SpaghettiNode(
num_filters=64,
level=8,
layers=[
SepConvOp(3, 1, False),
SepConvOp(3, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='c0n4')])
node_specs = SpaghettiNodeSpecs(nodes=nodes, outputs=outputs)
return node_specs
def _spaghettinet_edgetpu_l():
"""Architecture definition for SpaghettiNet-EdgeTPU-L."""
nodes = collections.OrderedDict()
outputs = ['c0n1', 'c0n2', 'c0n3', 'c0n4', 'c0n5']
nodes['s0'] = SpaghettiStemNode(kernel_size=5, num_filters=24)
nodes['n0'] = SpaghettiNode(
num_filters=48,
level=2,
layers=[
IbnFusedGrouped(3, 8, 2, 3, False),
],
edges=[SpaghettiPassthroughEdge(input='s0')])
nodes['n1'] = SpaghettiNode(
num_filters=64,
level=3,
layers=[
IbnFusedGrouped(3, 8, 2, 4, False),
IbnFusedGrouped(3, 8, 1, 4, True),
IbnFusedGrouped(3, 8, 1, 4, True),
IbnFusedGrouped(3, 4, 1, 4, True),
],
edges=[SpaghettiPassthroughEdge(input='n0')])
nodes['n2'] = SpaghettiNode(
num_filters=80,
level=4,
layers=[
IbnOp(3, 8, 2, False),
IbnOp(3, 8, 1, True),
IbnOp(3, 8, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n1')])
nodes['n3'] = SpaghettiNode(
num_filters=104,
level=5,
layers=[
IbnOp(3, 8, 2, False),
IbnOp(3, 8, 1, True),
IbnOp(3, 8, 1, True),
IbnOp(3, 8, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n2')])
nodes['n4'] = SpaghettiNode(
num_filters=88,
level=6,
layers=[
IbnOp(3, 8, 2, False),
IbnOp(5, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 8, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n3')])
nodes['n5'] = SpaghettiNode(
num_filters=56,
level=7,
layers=[
IbnOp(5, 4, 2, False),
SepConvOp(5, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n4')])
nodes['c0n0'] = SpaghettiNode(
num_filters=160,
level=5,
layers=[
IbnOp(3, 8, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n3'),
SpaghettiResampleEdge(input='n4')
])
nodes['c0n1'] = SpaghettiNode(
num_filters=120,
level=4,
layers=[
IbnOp(3, 8, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 8, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n2'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n2'] = SpaghettiNode(
num_filters=168,
level=5,
layers=[
IbnOp(3, 4, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='c0n1'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n3'] = SpaghettiNode(
num_filters=112,
level=6,
layers=[
IbnOp(3, 8, 1, False),
IbnOp(3, 4, 1, True),
SepConvOp(3, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n4'] = SpaghettiNode(
num_filters=128,
level=7,
layers=[
IbnOp(3, 4, 1, False),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n5'] = SpaghettiNode(
num_filters=64,
level=8,
layers=[
SepConvOp(5, 1, False),
SepConvOp(5, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='c0n4')])
node_specs = SpaghettiNodeSpecs(nodes=nodes, outputs=outputs)
return node_specs
def lookup_spaghetti_arch(arch):
"""Lookup table for the nodes structure for spaghetti nets."""
if arch == 'spaghettinet_edgetpu_s':
return _spaghettinet_edgetpu_s()
elif arch == 'spaghettinet_edgetpu_m':
return _spaghettinet_edgetpu_m()
elif arch == 'spaghettinet_edgetpu_l':
return _spaghettinet_edgetpu_l()
else:
raise ValueError('Unknown architecture {}'.format(arch))
class SSDSpaghettinetFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using Custom Architecture."""
def __init__(
self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
spaghettinet_arch_name='spaghettinet_edgetpu_m',
use_explicit_padding=False,
reuse_weights=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
):
"""SSD FPN feature extractor based on Mobilenet v2 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: Not used in SpaghettiNet.
min_depth: Not used in SpaghettiNet.
pad_to_multiple: Not used in SpaghettiNet.
conv_hyperparams_fn: Not used in SpaghettiNet.
spaghettinet_arch_name: name of the specific architecture.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
reuse_weights: Not used in SpaghettiNet.
use_depthwise: Not used in SpaghettiNet.
override_base_feature_extractor_hyperparams: Not used in SpaghettiNet.
"""
super(SSDSpaghettinetFeatureExtractor, self).__init__(
is_training=is_training,
use_explicit_padding=use_explicit_padding,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams
)
self._spaghettinet_arch_name = spaghettinet_arch_name
self._use_native_resize_op = False if is_training else True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
nodes_dict = lookup_spaghetti_arch(self._spaghettinet_arch_name)
with tf.variable_scope(
self._spaghettinet_arch_name, reuse=self._reuse_weights):
with slim.arg_scope([slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=0.03),
weights_regularizer=slim.l2_regularizer(1e-5)):
with slim.arg_scope([slim.separable_conv2d],
weights_initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=0.03),
weights_regularizer=slim.l2_regularizer(1e-5)):
with slim.arg_scope([slim.batch_norm],
is_training=self._is_training,
epsilon=0.001,
decay=0.97,
center=True,
scale=True):
spaghetti_net = SpaghettiNet(
node_specs=nodes_dict,
is_training=self._is_training,
use_native_resize_op=self._use_native_resize_op,
use_explicit_padding=self._use_explicit_padding,
name=self._spaghettinet_arch_name)
feature_maps = spaghetti_net.apply(preprocessed_inputs)
return feature_maps
| 31,260 | 33.889509 | 95 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Feature Pyramid Network (FPN) feature extractors based on Resnet v1.
See https://arxiv.org/abs/1708.02002 for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import resnet_v1
class SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD FPN feature extractor based on Resnet v1 architecture."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_base_fn,
resnet_scope_name,
fpn_scope_name,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
resnet_base_fn: base resnet network to use.
resnet_scope_name: scope name under which to construct resnet
fpn_scope_name: scope name under which to construct the feature pyramid
network.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: On supplying invalid arguments for unused arguments.
"""
super(SSDResnetV1FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
if self._use_explicit_padding is True:
raise ValueError('Explicit padding is not a valid option.')
self._resnet_base_fn = resnet_base_fn
self._resnet_scope_name = resnet_scope_name
self._fpn_scope_name = fpn_scope_name
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._use_native_resize_op = use_native_resize_op
def preprocess(self, resized_inputs):
"""SSD preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge.
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def _filter_features(self, image_features):
# TODO(rathodv): Change resnet endpoint to strip scope prefixes instead
# of munging the scope here.
filtered_image_features = dict({})
for key, feature in image_features.items():
feature_name = key.split('/')[-1]
if feature_name in ['block1', 'block2', 'block3', 'block4']:
filtered_image_features[feature_name] = feature
return filtered_image_features
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
with tf.variable_scope(
self._resnet_scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = self._resnet_base_fn(
inputs=ops.pad_to_multiple(preprocessed_inputs,
self._pad_to_multiple),
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
store_non_strided_activations=True,
min_base_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
image_features = self._filter_features(image_features)
depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope(self._fpn_scope_name,
reuse=self._reuse_weights):
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=depth_fn(self._additional_layer_depth),
use_native_resize_op=self._use_native_resize_op)
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(
fpn_features['top_down_block{}'.format(level - 1)])
last_feature_map = fpn_features['top_down_block{}'.format(
base_fpn_max_level - 1)]
# Construct coarse features
for i in range(base_fpn_max_level, self._fpn_max_level):
last_feature_map = slim.conv2d(
last_feature_map,
num_outputs=depth_fn(self._additional_layer_depth),
kernel_size=[3, 3],
stride=2,
padding='SAME',
scope='bottom_up_block{}'.format(i))
feature_maps.append(last_feature_map)
return feature_maps
class SSDResnet50V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet50 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet50V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_50,
'resnet_v1_50',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class SSDResnet101V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet101 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet101 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet101V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_101,
'resnet_v1_101',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class SSDResnet152V1FpnFeatureExtractor(SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet152 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet152 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet152V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_152,
'resnet_v1_152',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
| 16,979 | 42.42711 | 80 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_testbase.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 FPN feature extractors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.utils import test_utils
class SSDResnetFPNFeatureExtractorTestBase(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
"""Helper test class for SSD Resnet v1 FPN feature extractors."""
@abc.abstractmethod
def _resnet_scope_name(self):
pass
@abc.abstractmethod
def _fpn_scope_name(self):
return 'fpn'
@abc.abstractmethod
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
min_depth=32,
use_keras=False):
pass
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=self.is_tf2())
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=self.is_tf2())
def test_extract_features_returns_correct_shapes_with_depth_multiplier(
self):
image_height = 256
image_width = 256
depth_multiplier = 0.5
expected_num_channels = int(256 * depth_multiplier)
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, expected_num_channels),
(2, 16, 16, expected_num_channels),
(2, 8, 8, expected_num_channels),
(2, 4, 4, expected_num_channels),
(2, 2, 2, expected_num_channels)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=self.is_tf2())
def test_extract_features_returns_correct_shapes_with_min_depth(
self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
min_depth = 320
expected_feature_map_shape = [(2, 32, 32, min_depth),
(2, 16, 16, min_depth),
(2, 8, 8, min_depth),
(2, 4, 4, min_depth),
(2, 2, 2, min_depth)]
with test_utils.GraphContextOrNone() as g:
image_tensor = tf.random.uniform([2, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, min_depth=min_depth,
use_keras=self.is_tf2())
def graph_fn():
if self.is_tf2():
return feature_extractor(image_tensor)
return feature_extractor.extract_features(image_tensor)
feature_maps = self.execute(graph_fn, [], graph=g)
for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shape):
self.assertAllEqual(feature_map.shape, expected_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 254
image_width = 254
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=self.is_tf2())
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=self.is_tf2())
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image_np = np.random.rand(4, image_height, image_width, 3)
with test_utils.GraphContextOrNone() as g:
test_image = tf.constant(test_image_np)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=self.is_tf2())
def graph_fn():
preprocessed_image = feature_extractor.preprocess(test_image)
return preprocessed_image
preprocessed_image_out = self.execute(graph_fn, [], graph=g)
self.assertAllClose(preprocessed_image_out,
test_image_np - [[123.68, 116.779, 103.939]])
def test_variables_only_created_in_scope(self):
if self.is_tf2():
self.skipTest('test_variables_only_created_in_scope is only tf1')
depth_multiplier = 1
pad_to_multiple = 1
scope_name = self._resnet_scope_name()
self.check_feature_extractor_variables_under_scope(
depth_multiplier,
pad_to_multiple,
scope_name,
use_keras=self.is_tf2())
def test_variable_count(self):
if self.is_tf2():
self.skipTest('test_variable_count is only tf1')
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier,
pad_to_multiple,
use_keras=self.is_tf2())
# The number of expected variables in resnet_v1_50, resnet_v1_101,
# and resnet_v1_152 is 279, 534, and 789 respectively.
expected_variables_len = 279
scope_name = self._resnet_scope_name()
if scope_name in ('ResNet101V1_FPN', 'resnet_v1_101'):
expected_variables_len = 534
elif scope_name in ('ResNet152V1_FPN', 'resnet_v1_152'):
expected_variables_len = 789
self.assertEqual(len(variables), expected_variables_len)
| 7,538 | 38.062176 | 80 | py |
models | models-master/research/object_detection/models/ssd_inception_v3_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for InceptionV3 features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import inception_v3
class SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using InceptionV3 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False):
"""InceptionV3 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: If `override_base_feature_extractor_hyperparams` is False.
"""
super(SSDInceptionV3FeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
if not self._override_base_feature_extractor_hyperparams:
raise ValueError('SSD Inception V3 feature extractor always uses'
'scope returned by `conv_hyperparams_fn` for both the '
'base feature extractor and the additional layers '
'added since there is no arg_scope defined for the base '
'feature extractor.')
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
feature_map_layout = {
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''
][:self._num_layers],
'layer_depth': [-1, -1, -1, 512, 256, 128][:self._num_layers],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope:
_, image_features = inception_v3.inception_v3_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Mixed_7c',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
| 5,557 | 39.569343 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobileNetEdgeTPU features."""
from object_detection.models import ssd_mobilenet_v3_feature_extractor
from nets.mobilenet import mobilenet_v3
class SSDMobileNetEdgeTPUFeatureExtractor(
ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3FeatureExtractorBase):
"""MobileNetEdgeTPU feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobilenetEdgeTPU'):
super(SSDMobileNetEdgeTPUFeatureExtractor, self).__init__(
conv_defs=mobilenet_v3.V3_EDGETPU,
from_layer=['layer_18/expansion_output', 'layer_23'],
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name
)
| 2,048 | 39.98 | 96 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV2 features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
class SSDMobileNetV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV2 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False):
"""MobileNetV2 Feature Extractor for SSD Models.
Mobilenet v2 (experimental), designed by sandler@. More details can be found
in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDMobileNetV2FeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
feature_map_layout = {
'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', ''
][:self._num_layers],
'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
'use_depthwise': self._use_depthwise,
'use_explicit_padding': self._use_explicit_padding,
}
with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \
slim.arg_scope(
[mobilenet.depth_multiplier], min_depth=self._min_depth):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = mobilenet_v2.mobilenet_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='layer_19',
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
| 5,758 | 40.135714 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v3_feature_extractor_testbase.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class for ssd_mobilenet_v3_feature_extractor."""
import abc
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
class _SsdMobilenetV3FeatureExtractorTestBase(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
"""Base class for MobilenetV3 tests."""
@abc.abstractmethod
def _get_input_sizes(self):
"""Return feature map sizes for the two inputs to SSD head."""
pass
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]),
(2, 4, 4, input_feature_sizes[1]),
(2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_keras=False)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]),
(2, 10, 10, input_feature_sizes[1]),
(2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]),
(2, 10, 10, input_feature_sizes[1]),
(2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_has_fused_batchnorm(self):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
| 4,543 | 39.212389 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_pnas_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""PNASNet Faster R-CNN implementation.
Based on PNASNet model: https://arxiv.org/abs/1712.00559
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import variables_helper
from nets.nasnet import nasnet_utils
try:
from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
arg_scope = slim.arg_scope
def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
"""Defines the default arg scope for the PNASNet Large for object detection.
This provides a small edit to switch batch norm training on and off.
Args:
is_batch_norm_training: Boolean indicating whether to train with batch norm.
Returns:
An `arg_scope` to use for the PNASNet Large Model.
"""
imagenet_scope = pnasnet.pnasnet_large_arg_scope()
with arg_scope(imagenet_scope):
with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
return sc
def _filter_scaling(reduction_indices, start_cell_num):
"""Compute the expected filter scaling at given PNASNet cell start_cell_num.
In the pnasnet.py code, filter_scaling starts at 1.0. We instead
adapt filter scaling to depend on the starting cell.
At first cells, before any reduction, filter_scalling is 1.0. With passing
any reduction cell, the filter_scaling is multiplied by 2.
Args:
reduction_indices: list of int indices.
start_cell_num: int.
Returns:
filter_scaling: float.
"""
filter_scaling = 1.0
for ind in reduction_indices:
if ind < start_cell_num:
filter_scaling *= 2.0
return filter_scaling
# Note: This is largely a copy of _build_pnasnet_base inside pnasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. It constructs only
# a sub-network from the original PNASNet model, starting from the
# start_cell_num cell and with modified final layer.
def _build_pnasnet_base(
hidden_previous, hidden, normal_cell, hparams, true_cell_num,
start_cell_num):
"""Constructs a PNASNet image model for proposal classifier features."""
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
filter_scaling = _filter_scaling(reduction_indices, start_cell_num)
# Note: The None is prepended to match the behavior of _imagenet_stem()
cell_outputs = [None, hidden_previous, hidden]
net = hidden
# Run the cells
for cell_num in range(start_cell_num, hparams.num_cells):
is_reduction = cell_num in reduction_indices
stride = 2 if is_reduction else 1
if is_reduction: filter_scaling *= hparams.filter_scaling_rate
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num)
true_cell_num += 1
cell_outputs.append(net)
# Final nonlinearity.
# Note that we have dropped the final pooling, dropout and softmax layers
# from the default pnasnet version.
with tf.variable_scope('final_layer'):
net = tf.nn.relu(net)
return net
# TODO(shlens): Only fixed_shape_resizer is currently supported for PNASNet
# featurization. The reason for this is that pnasnet.py only supports
# inputs with fully known shapes. We need to update pnasnet.py to handle
# shapes not known at compile time.
class FasterRCNNPNASFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN with PNASNet feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 16.
"""
if first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 16.')
super(FasterRCNNPNASFeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN with PNAS preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Extracts features using the first half of the PNASNet network.
We construct the network in `align_feature_maps=True` mode, which means
that all VALID paddings in the network are changed to SAME padding so that
the feature maps are aligned.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
end_points: A dictionary mapping feature extractor tensor names to tensors
Raises:
ValueError: If the created network is missing the required activation.
"""
del scope
if len(preprocessed_inputs.get_shape().as_list()) != 4:
raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
'tensor of shape %s' % preprocessed_inputs.get_shape())
with slim.arg_scope(pnasnet_large_arg_scope_for_detection(
is_batch_norm_training=self._train_batch_norm)):
with arg_scope([slim.conv2d,
slim.batch_norm,
slim.separable_conv2d],
reuse=self._reuse_weights):
_, end_points = pnasnet.build_pnasnet_large(
preprocessed_inputs, num_classes=None,
is_training=self._is_training,
final_endpoint='Cell_7')
# Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160.
# Cell_7 is the last cell before second reduction.
rpn_feature_map = tf.concat([end_points['Cell_6'],
end_points['Cell_7']], 3)
# pnasnet.py does not maintain the batch size in the first dimension.
# This work around permits us retaining the batch for below.
batch = preprocessed_inputs.get_shape().as_list()[0]
shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
rpn_feature_map_shape = [batch] + shape_without_batch
rpn_feature_map.set_shape(rpn_feature_map_shape)
return rpn_feature_map, end_points
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
This function reconstructs the "second half" of the PNASNet
network after the part defined in `_extract_proposal_features`.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
del scope
# Number of used stem cells.
num_stem_cells = 2
# Note that we always feed into 2 layers of equal depth
# where the first N channels corresponds to previous hidden layer
# and the second N channels correspond to the final hidden layer.
hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3)
# Note that what follows is largely a copy of build_pnasnet_large() within
# pnasnet.py. We are copying to minimize code pollution in slim.
# TODO(shlens,skornblith): Determine the appropriate drop path schedule.
# For now the schedule is the default (1.0->0.7 over 250,000 train steps).
hparams = pnasnet.large_imagenet_config()
if not self._is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
# Calculate the total number of cells in the network
total_num_cells = hparams.num_cells + num_stem_cells
normal_cell = pnasnet.PNasNetNormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps)
with arg_scope([slim.dropout, nasnet_utils.drop_path],
is_training=self._is_training):
with arg_scope([slim.batch_norm], is_training=self._train_batch_norm):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
# This corresponds to the cell number just past 'Cell_7' used by
# _extract_proposal_features().
start_cell_num = 8
true_cell_num = start_cell_num + num_stem_cells
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
net = _build_pnasnet_base(
hidden_previous,
hidden,
normal_cell=normal_cell,
hparams=hparams,
true_cell_num=true_cell_num,
start_cell_num=start_cell_num)
proposal_classifier_features = net
return proposal_classifier_features
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Note that this overrides the default implementation in
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for
PNASNet checkpoints.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
if variable.op.name.startswith(
first_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
first_stage_feature_extractor_scope + '/', '')
var_name += '/ExponentialMovingAverage'
variables_to_restore[var_name] = variable
if variable.op.name.startswith(
second_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
second_stage_feature_extractor_scope + '/', '')
var_name += '/ExponentialMovingAverage'
variables_to_restore[var_name] = variable
return variables_to_restore
| 12,550 | 37.148936 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_keras_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FasterRcnnInceptionResnetV2KerasFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_inc_res.FasterRCNNInceptionResnetV2KerasFeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 19, 19, 1088])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 28, 28, 1088])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1088])
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
self.assertAllEqual(features_shape.numpy(), [2, 9, 9, 1536])
if __name__ == '__main__':
tf.test.main()
| 3,528 | 42.567901 | 108 | py |
models | models-master/research/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing mobilenet_v2+FPN feature extractor for CenterNet."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_mobilenet_v2_fpn_feature_extractor
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMobileNetV2FPNFeatureExtractorTest(test_case.TestCase):
def test_center_net_mobilenet_v2_fpn_feature_extractor(self):
channel_means = (0., 0., 0.)
channel_stds = (1., 1., 1.)
bgr_ordering = False
model = (
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn(
channel_means, channel_stds, bgr_ordering,
use_separable_conv=False))
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 24))
# Pull out the FPN network.
output = model.get_layer('model_1')
for layer in output.layers:
# All convolution layers should be normal 2D convolutions.
if 'conv' in layer.name:
self.assertIsInstance(layer, tf.keras.layers.Conv2D)
def test_center_net_mobilenet_v2_fpn_feature_extractor_sep_conv(self):
channel_means = (0., 0., 0.)
channel_stds = (1., 1., 1.)
bgr_ordering = False
model = (
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn(
channel_means, channel_stds, bgr_ordering, use_separable_conv=True))
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 24))
# Pull out the FPN network.
backbone = model.get_layer('model')
first_conv = backbone.get_layer('Conv1')
self.assertEqual(32, first_conv.filters)
# Pull out the FPN network.
output = model.get_layer('model_1')
for layer in output.layers:
# Convolution layers with kernel size not equal to (1, 1) should be
# separable 2D convolutions.
if 'conv' in layer.name and layer.kernel_size != (1, 1):
self.assertIsInstance(layer, tf.keras.layers.SeparableConv2D)
def test_center_net_mobilenet_v2_fpn_feature_extractor_depth_multiplier(self):
channel_means = (0., 0., 0.)
channel_stds = (1., 1., 1.)
bgr_ordering = False
model = (
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn(
channel_means, channel_stds, bgr_ordering, use_separable_conv=True,
depth_multiplier=2.0))
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 24))
# Pull out the FPN network.
backbone = model.get_layer('model')
first_conv = backbone.get_layer('Conv1')
# Note that the first layer typically has 32 filters, but this model has
# a depth multiplier of 2.
self.assertEqual(64, first_conv.filters)
def test_center_net_mobilenet_v2_fpn_feature_extractor_interpolation(self):
channel_means = (0., 0., 0.)
channel_stds = (1., 1., 1.)
bgr_ordering = False
model = (
center_net_mobilenet_v2_fpn_feature_extractor.mobilenet_v2_fpn(
channel_means, channel_stds, bgr_ordering, use_separable_conv=True,
upsampling_interpolation='bilinear'))
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 24))
# Verify the upsampling layers in the FPN use 'bilinear' interpolation.
fpn = model.get_layer('model_1')
for layer in fpn.layers:
if 'up_sampling2d' in layer.name:
self.assertEqual('bilinear', layer.interpolation)
if __name__ == '__main__':
tf.test.main()
| 4,896 | 35.819549 | 81 | py |
models | models-master/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for faster_rcnn_inception_v2_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_v2_feature_extractor as faster_rcnn_inception_v2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnInceptionV2FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return faster_rcnn_inception_v2.FasterRCNNInceptionV2FeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[4, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [4, 14, 14, 576])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[4, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [4, 14, 14, 576])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 576])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_on_very_small_images(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(
features_shape,
feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)})
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[3, 14, 14, 576], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [3, 7, 7, 1024])
if __name__ == '__main__':
tf.test.main()
| 5,420 | 41.023256 | 106 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_fpn_keras_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Keras-based MobilenetV1 FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import mobilenet_v1
from object_detection.models.keras_models import model_utils
from object_detection.utils import ops
from object_detection.utils import shape_utils
# A modified config of mobilenet v1 that makes it more detection friendly.
def _create_modified_mobilenet_config():
conv_def_block_12 = model_utils.ConvDefs(conv_name='conv_pw_12', filters=512)
conv_def_block_13 = model_utils.ConvDefs(conv_name='conv_pw_13', filters=256)
return [conv_def_block_12, conv_def_block_13]
class SSDMobileNetV1FpnKerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based MobilenetV1 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False,
name=None):
"""SSD Keras based FPN feature extractor Mobilenet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to MobileNet v1 layers
{Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise,
Conv2d_13_pointwise}, respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: whether to use depthwise convolutions. Default is False.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDMobileNetV1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._conv_defs = None
if self._use_depthwise:
self._conv_defs = _create_modified_mobilenet_config()
self._use_native_resize_op = use_native_resize_op
self._feature_blocks = [
'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise',
'Conv2d_13_pointwise'
]
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def build(self, input_shape):
full_mobilenet_v1 = mobilenet_v1.mobilenet_v1(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
weights=None,
use_explicit_padding=self._use_explicit_padding,
alpha=self._depth_multiplier,
min_depth=self._min_depth,
conv_defs=self._conv_defs,
include_top=False)
conv2d_3_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_3_relu').output
conv2d_5_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_5_relu').output
conv2d_11_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_11_relu').output
conv2d_13_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_13_relu').output
self.classification_backbone = tf.keras.Model(
inputs=full_mobilenet_v1.inputs,
outputs=[conv2d_3_pointwise, conv2d_5_pointwise,
conv2d_11_pointwise, conv2d_13_pointwise]
)
# pylint:disable=g-long-lambda
self._depth_fn = lambda d: max(
int(d * self._depth_multiplier), self._min_depth)
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._depth_fn(self._additional_layer_depth),
use_depthwise=self._use_depthwise,
use_explicit_padding=self._use_explicit_padding,
use_native_resize_op=self._use_native_resize_op,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
# Construct coarse feature layers
padding = 'VALID' if self._use_explicit_padding else 'SAME'
kernel_size = 3
stride = 2
for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1):
coarse_feature_layers = []
if self._use_explicit_padding:
def fixed_padding(features, kernel_size=kernel_size):
return ops.fixed_padding(features, kernel_size)
coarse_feature_layers.append(tf.keras.layers.Lambda(
fixed_padding, name='fixed_padding'))
layer_name = 'bottom_up_Conv2d_{}'.format(
i - self._base_fpn_max_level + 13)
conv_block = feature_map_generators.create_conv_block(
self._use_depthwise, kernel_size, padding, stride, layer_name,
self._conv_hyperparams, self._is_training, self._freeze_batchnorm,
self._depth_fn(self._additional_layer_depth))
coarse_feature_layers.extend(conv_block)
self._coarse_feature_layers.append(coarse_feature_layers)
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append(self._feature_blocks[level - 2])
feature_start_index = len(self._feature_blocks) - self._num_levels
fpn_input_image_features = [
(key, image_features[feature_start_index + index])
for index, key in enumerate(feature_block_list)]
fpn_features = self._fpn_features_generator(fpn_input_image_features)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_{}'.format(
self._feature_blocks[level - 2])])
last_feature_map = fpn_features['top_down_{}'.format(
self._feature_blocks[self._base_fpn_max_level - 2])]
for coarse_feature_layers in self._coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
return feature_maps
def restore_from_classification_checkpoint_fn(self, feature_extractor_scope):
"""Returns a map for restoring from an (object-based) checkpoint.
Args:
feature_extractor_scope: A scope name for the feature extractor (unused).
Returns:
A dict mapping keys to Keras models
"""
return {'feature_extractor': self.classification_backbone}
| 11,195 | 42.734375 | 80 | py |
models | models-master/research/object_detection/models/feature_map_generators.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate a list of feature maps based on image features.
Provides several feature map generators that can be used to build object
detection feature extractors.
Object detection feature extractors usually are built by stacking two components
- A base feature extractor such as Inception V3 and a feature map generator.
Feature map generators build on the base feature extractors and produce a list
of final feature maps.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.utils import ops
from object_detection.utils import shape_utils
# Activation bound used for TPU v1. Activations will be clipped to
# [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with
# use_bounded_activations enabled.
ACTIVATION_BOUND = 6.0
def get_depth_fn(depth_multiplier, min_depth):
"""Builds a callable to compute depth (output channels) of conv filters.
Args:
depth_multiplier: a multiplier for the nominal depth.
min_depth: a lower bound on the depth of filters.
Returns:
A callable that takes in a nominal depth and returns the depth to use.
"""
def multiply_depth(depth):
new_depth = int(depth * depth_multiplier)
return max(new_depth, min_depth)
return multiply_depth
def create_conv_block(
use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams,
is_training, freeze_batchnorm, depth):
"""Create Keras layers for depthwise & non-depthwise convolutions.
Args:
use_depthwise: Whether to use depthwise separable conv instead of regular
conv.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters. Can be an int if both values are the same.
padding: One of 'VALID' or 'SAME'.
stride: A list of length 2: [stride_height, stride_width], specifying the
convolution stride. Can be an int if both strides are the same.
layer_name: String. The name of the layer.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Indicates whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
depth: Depth of output feature maps.
Returns:
A list of conv layers.
"""
layers = []
if use_depthwise:
kwargs = conv_hyperparams.params()
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
layers.append(
tf.keras.layers.SeparableConv2D(
depth, [kernel_size, kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**kwargs))
else:
layers.append(tf.keras.layers.Conv2D(
depth,
[kernel_size, kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
return layers
class KerasMultiResolutionFeatureMaps(tf.keras.Model):
"""Generates multi resolution feature maps from input image features.
A Keras model that generates multi-scale feature maps for detection as in the
SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, when called on inputs it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
When this feature generator object is called on input image_features:
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
def __init__(self,
feature_map_layout,
depth_multiplier,
min_depth,
insert_1x1_conv,
is_training,
conv_hyperparams,
freeze_batchnorm,
name=None):
"""Constructor.
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from
the feature map (instead of using the provided 'layer_depth' parameter).
In this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution
operations. Note that the current implementation only supports
generating new layers using convolutions of stride 2 (resulting in a
spatial resolution reduction by a factor of 2), and will be extended to
a more flexible design. Convolution kernel size is set to 3 by default,
and can be customized by 'conv_kernel_size' parameter (similarily,
'conv_kernel_size' should be set to -1 if 'from_layer' is specified).
The created convolution operation will be a normal 2D convolution by
default, and a depthwise convolution followed by 1x1 convolution if
'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1
convolution should be inserted before shrinking the feature map.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasMultiResolutionFeatureMaps, self).__init__(name=name)
self.feature_map_layout = feature_map_layout
self.convolutions = []
depth_fn = get_depth_fn(depth_multiplier, min_depth)
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
net = []
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
base_from_layer = from_layer
else:
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth // 2))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth // 2),
[1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
# We define this function here while capturing the value of
# conv_kernel_size, to avoid holding a reference to the loop variable
# conv_kernel_size inside of a lambda function
def fixed_padding(features, kernel_size=conv_kernel_size):
return ops.fixed_padding(features, kernel_size)
net.append(tf.keras.layers.Lambda(fixed_padding))
# TODO(rathodv): Add some utilities to simplify the creation of
# Depthwise & non-depthwise convolutions w/ normalization & activations
if use_depthwise:
net.append(tf.keras.layers.DepthwiseConv2D(
[conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_depthwise_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name + '_depthwise'))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
else:
net.append(tf.keras.layers.Conv2D(
depth_fn(layer_depth),
[conv_kernel_size, conv_kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
# Until certain bugs are fixed in checkpointable lists,
# this net must be appended only once it's been filled with layers
self.convolutions.append(net)
def call(self, image_features):
"""Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input.
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
feature_maps = []
feature_map_keys = []
for index, from_layer in enumerate(self.feature_map_layout['from_layer']):
if from_layer:
feature_map = image_features[from_layer]
feature_map_keys.append(from_layer)
else:
feature_map = feature_maps[-1]
for layer in self.convolutions[index]:
feature_map = layer(feature_map)
layer_name = self.convolutions[index][-1].name
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def multi_resolution_feature_maps(feature_map_layout, depth_multiplier,
min_depth, insert_1x1_conv, image_features,
pool_residual=False):
"""Generates multi resolution feature maps from input image features.
Generates multi-scale feature maps for detection as in the SSD papers by
Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from the
feature map (instead of using the provided 'layer_depth' parameter). In
this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution operations.
Note that the current implementation only supports generating new layers
using convolutions of stride 2 (resulting in a spatial resolution
reduction by a factor of 2), and will be extended to a more flexible
design. Convolution kernel size is set to 3 by default, and can be
customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size'
should be set to -1 if 'from_layer' is specified). The created convolution
operation will be a normal 2D convolution by default, and a depthwise
convolution followed by 1x1 convolution if 'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution
should be inserted before shrinking the feature map.
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
pool_residual: Whether to add an average pooling layer followed by a
residual connection between subsequent feature maps when the channel
depth match. For example, with option 'layer_depth': [-1, 512, 256, 256],
a pooling and residual layer is added between the third and forth feature
map. This option is better used with Weight Shared Convolution Box
Predictor when all feature maps have the same channel depth to encourage
more consistent features across multi-scale feature maps.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: if the number entries in 'from_layer' and
'layer_depth' do not match.
ValueError: if the generated layer does not have the same resolution
as specified.
"""
depth_fn = get_depth_fn(depth_multiplier, min_depth)
feature_map_keys = []
feature_maps = []
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
feature_map = image_features[from_layer]
base_from_layer = from_layer
feature_map_keys.append(from_layer)
else:
pre_layer = feature_maps[-1]
pre_layer_depth = pre_layer.get_shape().as_list()[3]
intermediate_layer = pre_layer
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth // 2))
intermediate_layer = slim.conv2d(
pre_layer,
depth_fn(layer_depth // 2), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
intermediate_layer = ops.fixed_padding(
intermediate_layer, conv_kernel_size)
if use_depthwise:
feature_map = slim.separable_conv2d(
intermediate_layer,
None, [conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
stride=stride,
scope=layer_name + '_depthwise')
feature_map = slim.conv2d(
feature_map,
depth_fn(layer_depth), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
if pool_residual and pre_layer_depth == depth_fn(layer_depth):
if use_explicit_padding:
pre_layer = ops.fixed_padding(pre_layer, conv_kernel_size)
feature_map += slim.avg_pool2d(
pre_layer, [conv_kernel_size, conv_kernel_size],
padding=padding,
stride=2,
scope=layer_name + '_pool')
else:
feature_map = slim.conv2d(
intermediate_layer,
depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size],
padding=padding,
stride=stride,
scope=layer_name)
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
class KerasFpnTopDownFeatureMaps(tf.keras.Model):
"""Generates Keras based `top-down` feature maps for Feature Pyramid Networks.
See https://arxiv.org/abs/1612.03144 for details.
"""
def __init__(self,
num_levels,
depth,
is_training,
conv_hyperparams,
freeze_batchnorm,
use_depthwise=False,
use_explicit_padding=False,
use_bounded_activations=False,
use_native_resize_op=False,
scope=None,
name=None):
"""Constructor.
Args:
num_levels: the number of image features.
depth: depth of output feature maps.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_depthwise: whether to use depthwise separable conv instead of regular
conv.
use_explicit_padding: whether to use explicit padding.
use_bounded_activations: Whether or not to clip activations to range
[-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend
themselves to quantized inference.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op
for the upsampling process instead of reshape and broadcasting
implementation.
scope: A scope name to wrap this op under.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasFpnTopDownFeatureMaps, self).__init__(name=name)
self.scope = scope if scope else 'top_down'
self.top_layers = []
self.residual_blocks = []
self.top_down_blocks = []
self.reshape_blocks = []
self.conv_layers = []
padding = 'VALID' if use_explicit_padding else 'SAME'
stride = 1
kernel_size = 3
def clip_by_value(features):
return tf.clip_by_value(features, -ACTIVATION_BOUND, ACTIVATION_BOUND)
# top layers
self.top_layers.append(tf.keras.layers.Conv2D(
depth, [1, 1], strides=stride, padding=padding,
name='projection_%d' % num_levels,
**conv_hyperparams.params(use_bias=True)))
if use_bounded_activations:
self.top_layers.append(tf.keras.layers.Lambda(
clip_by_value, name='clip_by_value'))
for level in reversed(list(range(num_levels - 1))):
# to generate residual from image features
residual_net = []
# to preprocess top_down (the image feature map from last layer)
top_down_net = []
# to reshape top_down according to residual if necessary
reshaped_residual = []
# to apply convolution layers to feature map
conv_net = []
# residual block
residual_net.append(tf.keras.layers.Conv2D(
depth, [1, 1], padding=padding, strides=1,
name='projection_%d' % (level + 1),
**conv_hyperparams.params(use_bias=True)))
if use_bounded_activations:
residual_net.append(tf.keras.layers.Lambda(
clip_by_value, name='clip_by_value'))
# top-down block
# TODO (b/128922690): clean-up of ops.nearest_neighbor_upsampling
if use_native_resize_op:
def resize_nearest_neighbor(image):
image_shape = shape_utils.combined_static_and_dynamic_shape(image)
return tf.image.resize_nearest_neighbor(
image, [image_shape[1] * 2, image_shape[2] * 2])
top_down_net.append(tf.keras.layers.Lambda(
resize_nearest_neighbor, name='nearest_neighbor_upsampling'))
else:
def nearest_neighbor_upsampling(image):
return ops.nearest_neighbor_upsampling(image, scale=2)
top_down_net.append(tf.keras.layers.Lambda(
nearest_neighbor_upsampling, name='nearest_neighbor_upsampling'))
# reshape block
if use_explicit_padding:
def reshape(inputs):
residual_shape = tf.shape(inputs[0])
return inputs[1][:, :residual_shape[1], :residual_shape[2], :]
reshaped_residual.append(
tf.keras.layers.Lambda(reshape, name='reshape'))
# down layers
if use_bounded_activations:
conv_net.append(tf.keras.layers.Lambda(
clip_by_value, name='clip_by_value'))
if use_explicit_padding:
def fixed_padding(features, kernel_size=kernel_size):
return ops.fixed_padding(features, kernel_size)
conv_net.append(tf.keras.layers.Lambda(
fixed_padding, name='fixed_padding'))
layer_name = 'smoothing_%d' % (level + 1)
conv_block = create_conv_block(
use_depthwise, kernel_size, padding, stride, layer_name,
conv_hyperparams, is_training, freeze_batchnorm, depth)
conv_net.extend(conv_block)
self.residual_blocks.append(residual_net)
self.top_down_blocks.append(top_down_net)
self.reshape_blocks.append(reshaped_residual)
self.conv_layers.append(conv_net)
def call(self, image_features):
"""Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input.
Args:
image_features: list of tuples of (tensor_name, image_feature_tensor).
Spatial resolutions of succesive tensors must reduce exactly by a factor
of 2.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
output_feature_maps_list = []
output_feature_map_keys = []
with tf.name_scope(self.scope):
top_down = image_features[-1][1]
for layer in self.top_layers:
top_down = layer(top_down)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append('top_down_%s' % image_features[-1][0])
num_levels = len(image_features)
for index, level in enumerate(reversed(list(range(num_levels - 1)))):
residual = image_features[level][1]
top_down = output_feature_maps_list[-1]
for layer in self.residual_blocks[index]:
residual = layer(residual)
for layer in self.top_down_blocks[index]:
top_down = layer(top_down)
for layer in self.reshape_blocks[index]:
top_down = layer([residual, top_down])
top_down += residual
for layer in self.conv_layers[index]:
top_down = layer(top_down)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append('top_down_%s' % image_features[level][0])
return collections.OrderedDict(reversed(
list(zip(output_feature_map_keys, output_feature_maps_list))))
def fpn_top_down_feature_maps(image_features,
depth,
use_depthwise=False,
use_explicit_padding=False,
use_bounded_activations=False,
scope=None,
use_native_resize_op=False):
"""Generates `top-down` feature maps for Feature Pyramid Networks.
See https://arxiv.org/abs/1612.03144 for details.
Args:
image_features: list of tuples of (tensor_name, image_feature_tensor).
Spatial resolutions of succesive tensors must reduce exactly by a factor
of 2.
depth: depth of output feature maps.
use_depthwise: whether to use depthwise separable conv instead of regular
conv.
use_explicit_padding: whether to use explicit padding.
use_bounded_activations: Whether or not to clip activations to range
[-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend
themselves to quantized inference.
scope: A scope name to wrap this op under.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
with tf.name_scope(scope, 'top_down'):
num_levels = len(image_features)
output_feature_maps_list = []
output_feature_map_keys = []
padding = 'VALID' if use_explicit_padding else 'SAME'
kernel_size = 3
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d], padding=padding, stride=1):
top_down = slim.conv2d(
image_features[-1][1],
depth, [1, 1], activation_fn=None, normalizer_fn=None,
scope='projection_%d' % num_levels)
if use_bounded_activations:
top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append(
'top_down_%s' % image_features[-1][0])
for level in reversed(list(range(num_levels - 1))):
if use_native_resize_op:
with tf.name_scope('nearest_neighbor_upsampling'):
top_down_shape = shape_utils.combined_static_and_dynamic_shape(
top_down)
top_down = tf.image.resize_nearest_neighbor(
top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2])
else:
top_down = ops.nearest_neighbor_upsampling(top_down, scale=2)
residual = slim.conv2d(
image_features[level][1], depth, [1, 1],
activation_fn=None, normalizer_fn=None,
scope='projection_%d' % (level + 1))
if use_bounded_activations:
residual = tf.clip_by_value(residual, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
if use_explicit_padding:
# slice top_down to the same shape as residual
residual_shape = tf.shape(residual)
top_down = top_down[:, :residual_shape[1], :residual_shape[2], :]
top_down += residual
if use_bounded_activations:
top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
if use_depthwise:
conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
pre_output = top_down
if use_explicit_padding:
pre_output = ops.fixed_padding(pre_output, kernel_size)
output_feature_maps_list.append(conv_op(
pre_output,
depth, [kernel_size, kernel_size],
scope='smoothing_%d' % (level + 1)))
output_feature_map_keys.append('top_down_%s' % image_features[level][0])
return collections.OrderedDict(reversed(
list(zip(output_feature_map_keys, output_feature_maps_list))))
def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers,
image_features, replace_pool_with_conv=False):
"""Generates pooling pyramid feature maps.
The pooling pyramid feature maps is motivated by
multi_resolution_feature_maps. The main difference are that it is simpler and
reduces the number of free parameters.
More specifically:
- Instead of using convolutions to shrink the feature map, it uses max
pooling, therefore totally gets rid of the parameters in convolution.
- By pooling feature from larger map up to a single cell, it generates
features in the same feature space.
- Instead of independently making box predictions from individual maps, it
shares the same classifier across different feature maps, therefore reduces
the "mis-calibration" across different scales.
See go/ppn-detection for more details.
Args:
base_feature_map_depth: Depth of the base feature before the max pooling.
num_layers: Number of layers used to make predictions. They are pooled
from the base feature.
image_features: A dictionary of handles to activation tensors from the
feature extractor.
replace_pool_with_conv: Whether or not to replace pooling operations with
convolutions in the PPN. Default is False.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: image_features does not contain exactly one entry
"""
if len(image_features) != 1:
raise ValueError('image_features should be a dictionary of length 1.')
image_features = image_features[list(image_features.keys())[0]]
feature_map_keys = []
feature_maps = []
feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth
if base_feature_map_depth > 0:
image_features = slim.conv2d(
image_features,
base_feature_map_depth,
[1, 1], # kernel size
padding='SAME', stride=1, scope=feature_map_key)
# Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for
# TPU v1 compatibility. Without the following dummy op, TPU runtime
# compiler will combine the convolution with one max-pooling below into a
# single cycle, so getting the conv2d feature becomes impossible.
image_features = slim.max_pool2d(
image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(image_features)
feature_map = image_features
if replace_pool_with_conv:
with slim.arg_scope([slim.conv2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i,
base_feature_map_depth)
feature_map = slim.conv2d(
feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
else:
with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'MaxPool2d_%d_2x2' % i
feature_map = slim.max_pool2d(
feature_map, [2, 2], padding='SAME', scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
| 35,694 | 42.162031 | 80 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 FPN feature extractors."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet50V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet50v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (
ssd_resnet_v1_fpn_feature_extractor.SSDResnet50V1FpnFeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _resnet_scope_name(self):
return 'resnet_v1_50'
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet101V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet101v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (
ssd_resnet_v1_fpn_feature_extractor.SSDResnet101V1FpnFeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _resnet_scope_name(self):
return 'resnet_v1_101'
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet152V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet152v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (
ssd_resnet_v1_fpn_feature_extractor.SSDResnet152V1FpnFeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _resnet_scope_name(self):
return 'resnet_v1_152'
if __name__ == '__main__':
tf.test.main()
| 3,411 | 38.674419 | 80 | py |
models | models-master/research/object_detection/models/center_net_mobilenet_v2_fpn_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet V2[1] + FPN[2] feature extractor for CenterNet[3] meta architecture.
[1]: https://arxiv.org/abs/1801.04381
[2]: https://arxiv.org/abs/1612.03144.
[3]: https://arxiv.org/abs/1904.07850
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2
_MOBILENET_V2_FPN_SKIP_LAYERS = [
'block_2_add', 'block_5_add', 'block_9_add', 'out_relu'
]
class CenterNetMobileNetV2FPNFeatureExtractor(
center_net_meta_arch.CenterNetFeatureExtractor):
"""The MobileNet V2 with FPN skip layers feature extractor for CenterNet."""
def __init__(self,
mobilenet_v2_net,
channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.),
bgr_ordering=False,
use_separable_conv=False,
upsampling_interpolation='nearest'):
"""Intializes the feature extractor.
Args:
mobilenet_v2_net: The underlying mobilenet_v2 network to use.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
use_separable_conv: If set to True, all convolutional layers in the FPN
network will be replaced by separable convolutions.
upsampling_interpolation: A string (one of 'nearest' or 'bilinear')
indicating which interpolation method to use for the upsampling ops in
the FPN.
"""
super(CenterNetMobileNetV2FPNFeatureExtractor, self).__init__(
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
self._base_model = mobilenet_v2_net
output = self._base_model(self._base_model.input)
# Add pyramid feature network on every layer that has stride 2.
skip_outputs = [
self._base_model.get_layer(skip_layer_name).output
for skip_layer_name in _MOBILENET_V2_FPN_SKIP_LAYERS
]
self._fpn_model = tf.keras.models.Model(
inputs=self._base_model.input, outputs=skip_outputs)
fpn_outputs = self._fpn_model(self._base_model.input)
# Construct the top-down feature maps -- we start with an output of
# 7x7x1280, which we continually upsample, apply a residual on and merge.
# This results in a 56x56x24 output volume.
top_layer = fpn_outputs[-1]
# Use normal convolutional layer since the kernel_size is 1.
residual_op = tf.keras.layers.Conv2D(
filters=64, kernel_size=1, strides=1, padding='same')
top_down = residual_op(top_layer)
num_filters_list = [64, 32, 24]
for i, num_filters in enumerate(num_filters_list):
level_ind = len(num_filters_list) - 1 - i
# Upsample.
upsample_op = tf.keras.layers.UpSampling2D(
2, interpolation=upsampling_interpolation)
top_down = upsample_op(top_down)
# Residual (skip-connection) from bottom-up pathway.
# Use normal convolutional layer since the kernel_size is 1.
residual_op = tf.keras.layers.Conv2D(
filters=num_filters, kernel_size=1, strides=1, padding='same')
residual = residual_op(fpn_outputs[level_ind])
# Merge.
top_down = top_down + residual
next_num_filters = num_filters_list[i + 1] if i + 1 <= 2 else 24
if use_separable_conv:
conv = tf.keras.layers.SeparableConv2D(
filters=next_num_filters, kernel_size=3, strides=1, padding='same')
else:
conv = tf.keras.layers.Conv2D(
filters=next_num_filters, kernel_size=3, strides=1, padding='same')
top_down = conv(top_down)
top_down = tf.keras.layers.BatchNormalization()(top_down)
top_down = tf.keras.layers.ReLU()(top_down)
output = top_down
self._feature_extractor_model = tf.keras.models.Model(
inputs=self._base_model.input, outputs=output)
def preprocess(self, resized_inputs):
resized_inputs = super(CenterNetMobileNetV2FPNFeatureExtractor,
self).preprocess(resized_inputs)
return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs)
def load_feature_extractor_weights(self, path):
self._base_model.load_weights(path)
@property
def classification_backbone(self):
return self._base_model
def call(self, inputs):
return [self._feature_extractor_model(inputs)]
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""The number of feature outputs returned by the feature extractor."""
return 1
def mobilenet_v2_fpn(channel_means, channel_stds, bgr_ordering,
use_separable_conv=False, depth_multiplier=1.0,
upsampling_interpolation='nearest', **kwargs):
"""The MobileNetV2+FPN backbone for CenterNet."""
del kwargs
# Set to batchnorm_training to True for now.
network = mobilenetv2.mobilenet_v2(
batchnorm_training=True,
alpha=depth_multiplier,
include_top=False,
weights='imagenet' if depth_multiplier == 1.0 else None)
return CenterNetMobileNetV2FPNFeatureExtractor(
network,
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering,
use_separable_conv=use_separable_conv,
upsampling_interpolation=upsampling_interpolation)
| 6,331 | 37.609756 | 81 | py |
models | models-master/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedded-friendly SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from nets import mobilenet_v1
class EmbeddedSSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""Embedded-friendly SSD Feature Extractor using MobilenetV1 features.
This feature extractor is similar to SSD MobileNetV1 feature extractor, and
it fixes input resolution to be 256x256, reduces the number of feature maps
used for box prediction and ensures convolution kernel to be no larger
than input tensor in spatial dimensions.
This feature extractor requires support of the following ops if used in
embedded devices:
- Conv
- DepthwiseConv
- Relu6
All conv/depthwiseconv use SAME padding, and no additional spatial padding is
needed.
"""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""MobileNetV1 Feature Extractor for Embedded-friendly SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to. For EmbeddedSSD it must be set to 1.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: upon invalid `pad_to_multiple` values.
"""
if pad_to_multiple != 1:
raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` '
'of 1.')
super(EmbeddedSSDMobileNetV1FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Raises:
ValueError: if image height or width are not 256 pixels.
"""
image_shape = preprocessed_inputs.get_shape()
image_shape.assert_has_rank(4)
image_height = image_shape[1].value
image_width = image_shape[2].value
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.equal(tf.shape(preprocessed_inputs)[1], 256),
tf.equal(tf.shape(preprocessed_inputs)[2], 256)),
['image size must be 256 in both height and width.'])
with tf.control_dependencies([shape_assert]):
preprocessed_inputs = tf.identity(preprocessed_inputs)
elif image_height != 256 or image_width != 256:
raise ValueError('image size must be = 256 in both height and width;'
' image dim = %d,%d' % (image_height, image_width))
feature_map_layout = {
'from_layer': [
'Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''
],
'layer_depth': [-1, -1, 512, 256, 256],
'conv_kernel_size': [-1, -1, 3, 3, 2],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
| 6,721 | 39.739394 | 80 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_testbase.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 feature extractors."""
import abc
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
class SSDResnetPpnFeatureExtractorTestBase(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
"""Helper test class for SSD Resnet PPN feature extractors."""
@abc.abstractmethod
def _scope_name(self):
pass
def test_extract_features_returns_correct_shapes_289(self):
image_height = 289
image_width = 289
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 1024), (2, 10, 10, 1024),
(2, 5, 5, 1024), (2, 3, 3, 1024),
(2, 2, 2, 1024), (2, 1, 1, 1024)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self):
image_height = 289
image_width = 289
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 1024), (2, 10, 10, 1024),
(2, 5, 5, 1024), (2, 3, 3, 1024),
(2, 2, 2, 1024), (2, 1, 1, 1024)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = tf.constant(np.random.rand(4, image_height, image_width, 3))
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
with self.test_session() as sess:
test_image_out, preprocessed_image_out = sess.run(
[test_image, preprocessed_image])
self.assertAllClose(preprocessed_image_out,
test_image_out - [[123.68, 116.779, 103.939]])
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, self._scope_name())
| 3,424 | 40.26506 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 PPN features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 PPN features."""
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=0,
num_layers=6,
image_features={
'image_features': image_features['Conv2d_11_pointwise']
})
return list(feature_maps.values())
| 3,265 | 37.880952 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster RCNN Keras-based Resnet V1 FPN Feature Extractor."""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import resnet_v1
from object_detection.utils import ops
_RESNET_MODEL_OUTPUT_LAYERS = {
'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out'],
'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out',
'conv4_block36_out', 'conv5_block3_out'],
}
class _ResnetFPN(tf.keras.layers.Layer):
"""Construct Resnet FPN layer."""
def __init__(self,
backbone_classifier,
fpn_features_generator,
coarse_feature_layers,
pad_to_multiple,
fpn_min_level,
resnet_block_names,
base_fpn_max_level):
"""Constructor.
Args:
backbone_classifier: Classifier backbone. Should be one of 'resnet_v1_50',
'resnet_v1_101', 'resnet_v1_152'.
fpn_features_generator: KerasFpnTopDownFeatureMaps that accepts a
dictionary of features and returns a ordered dictionary of fpn features.
coarse_feature_layers: Coarse feature layers for fpn.
pad_to_multiple: An integer multiple to pad input image.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet v1 layers.
resnet_block_names: a list of block names of resnet.
base_fpn_max_level: maximum level of fpn without coarse feature layers.
"""
super(_ResnetFPN, self).__init__()
self.classification_backbone = backbone_classifier
self.fpn_features_generator = fpn_features_generator
self.coarse_feature_layers = coarse_feature_layers
self.pad_to_multiple = pad_to_multiple
self._fpn_min_level = fpn_min_level
self._resnet_block_names = resnet_block_names
self._base_fpn_max_level = base_fpn_max_level
def call(self, inputs):
"""Create internal Resnet FPN layer.
Args:
inputs: A [batch, height_out, width_out, channels] float32 tensor
representing a batch of images.
Returns:
feature_maps: A list of tensors with shape [batch, height, width, depth]
represent extracted features.
"""
inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple)
backbone_outputs = self.classification_backbone(inputs)
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
feature_block_map = dict(
list(zip(self._resnet_block_names, backbone_outputs)))
fpn_input_image_features = [
(feature_block, feature_block_map[feature_block])
for feature_block in feature_block_list]
fpn_features = self.fpn_features_generator(fpn_input_image_features)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_block{}'.format(level-1)])
last_feature_map = fpn_features['top_down_block{}'.format(
self._base_fpn_max_level - 1)]
for coarse_feature_layers in self.coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
return feature_maps
class FasterRCNNResnetV1FpnKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features."""
def __init__(self,
is_training,
resnet_v1_base_model,
resnet_v1_base_model_name,
first_stage_features_stride,
conv_hyperparams,
batch_norm_trainable=True,
pad_to_multiple=32,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
first_stage_features_stride: See base class.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
batch_norm_trainable: See base class.
pad_to_multiple: An integer multiple to pad input image.
weight_decay: See base class.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet v1 layers.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNResnetV1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
self._conv_hyperparams = conv_hyperparams
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._freeze_batchnorm = (not batch_norm_trainable)
self._pad_to_multiple = pad_to_multiple
self._override_base_feature_extractor_hyperparams = \
override_base_feature_extractor_hyperparams
self._resnet_block_names = ['block1', 'block2', 'block3', 'block4']
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the Resnet v1 FPN network.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A list of tensors with shape [batch, height, width, depth]
"""
with tf.name_scope(name):
with tf.name_scope('ResnetV1FPN'):
full_resnet_v1_model = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=(self._conv_hyperparams if
self._override_base_feature_extractor_hyperparams
else None),
classes=None,
weights=None,
include_top=False)
output_layers = _RESNET_MODEL_OUTPUT_LAYERS[
self._resnet_v1_base_model_name]
outputs = [full_resnet_v1_model.get_layer(output_layer_name).output
for output_layer_name in output_layers]
self.classification_backbone = tf.keras.Model(
inputs=full_resnet_v1_model.inputs,
outputs=outputs)
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._additional_layer_depth,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
# Construct coarse feature layers
for i in range(self._base_fpn_max_level, self._fpn_max_level):
layers = []
layer_name = 'bottom_up_block{}'.format(i)
layers.append(
tf.keras.layers.Conv2D(
self._additional_layer_depth,
[3, 3],
padding='SAME',
strides=2,
name=layer_name + '_conv',
**self._conv_hyperparams.params()))
layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
self._conv_hyperparams.build_activation_layer(
name=layer_name))
self._coarse_feature_layers.append(layers)
feature_extractor_model = _ResnetFPN(self.classification_backbone,
self._fpn_features_generator,
self._coarse_feature_layers,
self._pad_to_multiple,
self._fpn_min_level,
self._resnet_block_names,
self._base_fpn_max_level)
return feature_extractor_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
Construct two fully connected layer to extract the box classifier features.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, 1, 1, 1024]
representing box classifier features for each proposal.
"""
with tf.name_scope(name):
with tf.name_scope('ResnetV1FPN'):
feature_extractor_model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=1024, activation='relu'),
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm)),
tf.keras.layers.Dense(units=1024, activation='relu'),
tf.keras.layers.Reshape((1, 1, 1024))
])
return feature_extractor_model
class FasterRCNNResnet50FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet50 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=True,
conv_hyperparams=None,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
conv_hyperparams: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet50FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams
)
class FasterRCNNResnet101FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet101 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=True,
conv_hyperparams=None,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
conv_hyperparams: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet101FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class FasterRCNNResnet152FpnKerasFeatureExtractor(
FasterRCNNResnetV1FpnKerasFeatureExtractor):
"""Faster RCNN with Resnet152 FPN feature extractor."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=True,
conv_hyperparams=None,
weight_decay=0.0,
fpn_min_level=2,
fpn_max_level=6,
additional_layer_depth=256,
override_base_feature_extractor_hyperparams=False):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
conv_hyperparams: See base class.
weight_decay: See base class.
fpn_min_level: See base class.
fpn_max_level: See base class.
additional_layer_depth: See base class.
override_base_feature_extractor_hyperparams: See base class.
"""
super(FasterRCNNResnet152FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
first_stage_features_stride=first_stage_features_stride,
conv_hyperparams=conv_hyperparams,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay,
fpn_min_level=fpn_min_level,
fpn_max_level=fpn_max_level,
additional_layer_depth=additional_layer_depth,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
| 17,933 | 40.227586 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_fpn_keras_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Keras-based MobilenetV2 FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import mobilenet_v2
from object_detection.models.keras_models import model_utils
from object_detection.utils import ops
from object_detection.utils import shape_utils
# Total number of blocks in Mobilenet_V2 base network.
NUM_LAYERS = 19
# A modified config of mobilenet v2 that makes it more detection friendly.
def _create_modified_mobilenet_config():
last_conv = model_utils.ConvDefs(conv_name='Conv_1', filters=256)
return [last_conv]
class SSDMobileNetV2FpnKerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based MobilenetV2 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False,
name=None):
"""SSD Keras based FPN feature extractor Mobilenet v2 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to MobileNet v2 layers
{layer_4, layer_7, layer_14, layer_19}, respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDMobileNetV2FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._conv_defs = None
if self._use_depthwise:
self._conv_defs = _create_modified_mobilenet_config()
self._use_native_resize_op = use_native_resize_op
self._feature_blocks = ['layer_4', 'layer_7', 'layer_14', 'layer_19']
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def build(self, input_shape):
full_mobilenet_v2 = mobilenet_v2.mobilenet_v2(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
weights=None,
use_explicit_padding=self._use_explicit_padding,
alpha=self._depth_multiplier,
min_depth=self._min_depth,
include_top=False,
input_shape=(None, None, input_shape[-1]))
layer_names = [layer.name for layer in full_mobilenet_v2.layers]
outputs = []
for layer_idx in [4, 7, 14]:
add_name = 'block_{}_add'.format(layer_idx - 2)
project_name = 'block_{}_project_BN'.format(layer_idx - 2)
output_layer_name = add_name if add_name in layer_names else project_name
outputs.append(full_mobilenet_v2.get_layer(output_layer_name).output)
layer_19 = full_mobilenet_v2.get_layer(name='out_relu').output
outputs.append(layer_19)
self.classification_backbone = tf.keras.Model(
inputs=full_mobilenet_v2.inputs,
outputs=outputs)
# pylint:disable=g-long-lambda
self._depth_fn = lambda d: max(
int(d * self._depth_multiplier), self._min_depth)
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._depth_fn(self._additional_layer_depth),
use_depthwise=self._use_depthwise,
use_explicit_padding=self._use_explicit_padding,
use_native_resize_op=self._use_native_resize_op,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
# Construct coarse feature layers
padding = 'VALID' if self._use_explicit_padding else 'SAME'
kernel_size = 3
stride = 2
for i in range(self._base_fpn_max_level + 1, self._fpn_max_level + 1):
coarse_feature_layers = []
if self._use_explicit_padding:
def fixed_padding(features, kernel_size=kernel_size):
return ops.fixed_padding(features, kernel_size)
coarse_feature_layers.append(tf.keras.layers.Lambda(
fixed_padding, name='fixed_padding'))
layer_name = 'bottom_up_Conv2d_{}'.format(
i - self._base_fpn_max_level + NUM_LAYERS)
conv_block = feature_map_generators.create_conv_block(
self._use_depthwise, kernel_size, padding, stride, layer_name,
self._conv_hyperparams, self._is_training, self._freeze_batchnorm,
self._depth_fn(self._additional_layer_depth))
coarse_feature_layers.extend(conv_block)
self._coarse_feature_layers.append(coarse_feature_layers)
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append(self._feature_blocks[level - 2])
feature_start_index = len(self._feature_blocks) - self._num_levels
fpn_input_image_features = [
(key, image_features[feature_start_index + index])
for index, key in enumerate(feature_block_list)]
fpn_features = self._fpn_features_generator(fpn_input_image_features)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_{}'.format(
self._feature_blocks[level - 2])])
last_feature_map = fpn_features['top_down_{}'.format(
self._feature_blocks[self._base_fpn_max_level - 2])]
for coarse_feature_layers in self._coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
return feature_maps
| 10,807 | 43.295082 | 80 | py |
models | models-master/research/object_detection/models/center_net_hourglass_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hourglass[1] feature extractor for CenterNet[2] meta architecture.
[1]: https://arxiv.org/abs/1603.06937
[2]: https://arxiv.org/abs/1904.07850
"""
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.models.keras_models import hourglass_network
class CenterNetHourglassFeatureExtractor(
center_net_meta_arch.CenterNetFeatureExtractor):
"""The hourglass feature extractor for CenterNet.
This class is a thin wrapper around the HourglassFeatureExtractor class
along with some preprocessing methods inherited from the base class.
"""
def __init__(self, hourglass_net, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Intializes the feature extractor.
Args:
hourglass_net: The underlying hourglass network to use.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetHourglassFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
self._network = hourglass_net
def call(self, inputs):
return self._network(inputs)
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
return self._network.num_hourglasses
def hourglass_10(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-10 backbone for CenterNet."""
del kwargs
network = hourglass_network.hourglass_10(num_channels=32)
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def hourglass_20(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-20 backbone for CenterNet."""
del kwargs
network = hourglass_network.hourglass_20(num_channels=48)
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def hourglass_32(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-32 backbone for CenterNet."""
del kwargs
network = hourglass_network.hourglass_32(num_channels=48)
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def hourglass_52(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-52 backbone for CenterNet."""
del kwargs
network = hourglass_network.hourglass_52(num_channels=64)
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def hourglass_104(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The Hourglass-104 backbone for CenterNet."""
del kwargs
# TODO(vighneshb): update hourglass_104 signature to match with other
# hourglass networks.
network = hourglass_network.hourglass_104()
return CenterNetHourglassFeatureExtractor(
network, channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
| 4,250 | 35.646552 | 80 | py |
models | models-master/research/object_detection/models/center_net_resnet_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnetv2 based feature extractors for CenterNet[1] meta architecture.
[1]: https://arxiv.org/abs/1904.07850
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor
class CenterNetResnetFeatureExtractor(CenterNetFeatureExtractor):
"""Resnet v2 base feature extractor for the CenterNet model."""
def __init__(self, resnet_type, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes the feature extractor with a specific ResNet architecture.
Args:
resnet_type: A string specifying which kind of ResNet to use. Currently
only `resnet_v2_50` and `resnet_v2_101` are supported.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetResnetFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
if resnet_type == 'resnet_v2_101':
self._base_model = tf.keras.applications.ResNet101V2(weights=None,
include_top=False)
output_layer = 'conv5_block3_out'
elif resnet_type == 'resnet_v2_50':
self._base_model = tf.keras.applications.ResNet50V2(weights=None,
include_top=False)
output_layer = 'conv5_block3_out'
else:
raise ValueError('Unknown Resnet Model {}'.format(resnet_type))
output_layer = self._base_model.get_layer(output_layer)
self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input,
outputs=output_layer.output)
resnet_output = self._resnet_model(self._base_model.input)
for num_filters in [256, 128, 64]:
# TODO(vighneshb) This section has a few differences from the paper
# Figure out how much of a performance impact they have.
# 1. We use a simple convolution instead of a deformable convolution
conv = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=3,
strides=1, padding='same')
resnet_output = conv(resnet_output)
resnet_output = tf.keras.layers.BatchNormalization()(resnet_output)
resnet_output = tf.keras.layers.ReLU()(resnet_output)
# 2. We use the default initialization for the convolution layers
# instead of initializing it to do bilinear upsampling.
conv_transpose = tf.keras.layers.Conv2DTranspose(filters=num_filters,
kernel_size=3, strides=2,
padding='same')
resnet_output = conv_transpose(resnet_output)
resnet_output = tf.keras.layers.BatchNormalization()(resnet_output)
resnet_output = tf.keras.layers.ReLU()(resnet_output)
self._feature_extractor_model = tf.keras.models.Model(
inputs=self._base_model.input, outputs=resnet_output)
def preprocess(self, resized_inputs):
"""Preprocess input images for the ResNet model.
This scales images in the range [0, 255] to the range [-1, 1]
Args:
resized_inputs: a [batch, height, width, channels] float32 tensor.
Returns:
outputs: a [batch, height, width, channels] float32 tensor.
"""
resized_inputs = super(CenterNetResnetFeatureExtractor, self).preprocess(
resized_inputs)
return tf.keras.applications.resnet_v2.preprocess_input(resized_inputs)
def load_feature_extractor_weights(self, path):
self._base_model.load_weights(path)
def call(self, inputs):
"""Returns image features extracted by the backbone.
Args:
inputs: An image tensor of shape [batch_size, input_height,
input_width, 3]
Returns:
features_list: A list of length 1 containing a tensor of shape
[batch_size, input_height // 4, input_width // 4, 64] containing
the features extracted by the ResNet.
"""
return [self._feature_extractor_model(inputs)]
@property
def num_feature_outputs(self):
return 1
@property
def out_stride(self):
return 4
@property
def classification_backbone(self):
return self._base_model
def resnet_v2_101(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v2 101 feature extractor."""
del kwargs
return CenterNetResnetFeatureExtractor(
resnet_type='resnet_v2_101',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering
)
def resnet_v2_50(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v2 50 feature extractor."""
del kwargs
return CenterNetResnetFeatureExtractor(
resnet_type='resnet_v2_50',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
| 5,892 | 37.266234 | 94 | py |
models | models-master/research/object_detection/models/ssd_pnasnet_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_pnas_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_pnasnet_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdPnasNetFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
is_training=True):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
is_training: whether the network is in training mode.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return ssd_pnasnet_feature_extractor.SSDPNASNetFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers)
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 2160), (2, 10, 10, 4320),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 2160), (2, 4, 4, 4320),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, num_layers=4)
if __name__ == '__main__':
tf.test.main()
| 4,375 | 39.146789 | 80 | py |
models | models-master/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing ResNet v1 FPN models for the CenterNet meta architecture."""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_resnet_v1_fpn_feature_extractor
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetResnetV1FpnFeatureExtractorTest(test_case.TestCase,
parameterized.TestCase):
@parameterized.parameters(
{'resnet_type': 'resnet_v1_50'},
{'resnet_type': 'resnet_v1_101'},
{'resnet_type': 'resnet_v1_18'},
{'resnet_type': 'resnet_v1_34'},
)
def test_correct_output_size(self, resnet_type):
"""Verify that shape of features returned by the backbone is correct."""
model = center_net_resnet_v1_fpn_feature_extractor.\
CenterNetResnetV1FpnFeatureExtractor(resnet_type)
def graph_fn():
img = np.zeros((8, 512, 512, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
self.assertEqual(self.execute(graph_fn, []).shape, (8, 128, 128, 64))
if __name__ == '__main__':
tf.test.main()
| 1,968 | 36.865385 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V2 FPN feature extractors in SSD.
"""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_fpn_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
@parameterized.parameters(
{
'use_depthwise': False,
},
{
'use_depthwise': True,
},
)
class SsdMobilenetV2FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
is_training=True,
use_explicit_padding=False,
use_keras=False,
use_depthwise=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
use_depthwise: Whether to use depthwise convolutions.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v2_fpn_keras_feature_extractor
.SSDMobileNetV2FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
name='MobilenetV2_FPN'))
def test_extract_features_returns_correct_shapes_256(self,
use_depthwise):
use_keras = True
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_384(self,
use_depthwise):
use_keras = True
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_4_channels(self,
use_depthwise):
use_keras = True
image_height = 320
image_width = 320
num_channels = 4
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise,
num_channels=num_channels)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise,
num_channels=num_channels)
def test_extract_features_with_dynamic_image_shape(self,
use_depthwise):
use_keras = True
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self, use_depthwise):
use_keras = True
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self, use_depthwise):
use_keras = True
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_raises_error_with_invalid_image_size(
self, use_depthwise=False):
use_keras = True
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_preprocess_returns_correct_value_range(self,
use_depthwise):
use_keras = True
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
if __name__ == '__main__':
tf.test.main()
| 10,575 | 33.789474 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v1_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 FPN feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_fpn_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV1FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v1_fpn_feature_extractor.
SSDMobileNetV1FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_depthwise=True,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_384(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=False)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=False)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=False)
self.assertEqual(len(variables), 153)
def test_fused_batchnorm(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
if __name__ == '__main__':
tf.test.main()
| 8,728 | 41.169082 | 80 | py |
models | models-master/research/object_detection/models/ssd_efficientnet_bifpn_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Keras-based EfficientNet + BiFPN (EfficientDet) Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from keras import backend as keras_backend
from six.moves import range
from six.moves import zip
import tensorflow.compat.v2 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import bidirectional_feature_pyramid_generators as bifpn_generators
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
try:
from official.legacy.image_classification.efficientnet import efficientnet_model
except ModuleNotFoundError:
from official.vision.image_classification.efficientnet import efficientnet_model
_EFFICIENTNET_LEVEL_ENDPOINTS = {
1: 'stack_0/block_0/project_bn',
2: 'stack_1/block_1/add',
3: 'stack_2/block_1/add',
4: 'stack_4/block_2/add',
5: 'stack_6/block_0/project_bn',
}
class SSDEfficientNetBiFPNKerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Keras-based EfficientNetBiFPN (EfficientDet) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level,
bifpn_max_level,
bifpn_num_iterations,
bifpn_num_filters,
bifpn_combine_method,
efficientnet_version,
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name=None):
"""SSD Keras-based EfficientNetBiFPN (EfficientDet) feature extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
efficientnet_version: the EfficientNet version to use for this feature
extractor's backbone.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for bifpn unsampling.
override_base_feature_extractor_hyperparams: Whether to override the
efficientnet backbone's default weight decay with the weight decay
defined by `conv_hyperparams`. Note, only overriding of weight decay is
currently supported.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetBiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
if depth_multiplier != 1.0:
raise ValueError('EfficientNetBiFPN does not support a non-default '
'depth_multiplier.')
if use_explicit_padding:
raise ValueError('EfficientNetBiFPN does not support explicit padding.')
if use_depthwise:
raise ValueError('EfficientNetBiFPN does not support use_depthwise.')
self._bifpn_min_level = bifpn_min_level
self._bifpn_max_level = bifpn_max_level
self._bifpn_num_iterations = bifpn_num_iterations
self._bifpn_num_filters = max(bifpn_num_filters, min_depth)
self._bifpn_node_params = {'combine_method': bifpn_combine_method}
self._efficientnet_version = efficientnet_version
self._use_native_resize_op = use_native_resize_op
logging.info('EfficientDet EfficientNet backbone version: %s',
self._efficientnet_version)
logging.info('EfficientDet BiFPN num filters: %d', self._bifpn_num_filters)
logging.info('EfficientDet BiFPN num iterations: %d',
self._bifpn_num_iterations)
self._backbone_max_level = min(
max(_EFFICIENTNET_LEVEL_ENDPOINTS.keys()), bifpn_max_level)
self._output_layer_names = [
_EFFICIENTNET_LEVEL_ENDPOINTS[i]
for i in range(bifpn_min_level, self._backbone_max_level + 1)]
self._output_layer_alias = [
'level_{}'.format(i)
for i in range(bifpn_min_level, self._backbone_max_level + 1)]
# Initialize the EfficientNet backbone.
# Note, this is currently done in the init method rather than in the build
# method, since doing so introduces an error which is not well understood.
efficientnet_overrides = {'rescale_input': False}
if override_base_feature_extractor_hyperparams:
efficientnet_overrides[
'weight_decay'] = conv_hyperparams.get_regularizer_weight()
if (conv_hyperparams.use_sync_batch_norm() and
keras_backend.is_tpu_strategy(tf.distribute.get_strategy())):
efficientnet_overrides['batch_norm'] = 'tpu'
efficientnet_base = efficientnet_model.EfficientNet.from_name(
model_name=self._efficientnet_version, overrides=efficientnet_overrides)
outputs = [efficientnet_base.get_layer(output_layer_name).output
for output_layer_name in self._output_layer_names]
self._efficientnet = tf.keras.Model(
inputs=efficientnet_base.inputs, outputs=outputs)
self.classification_backbone = efficientnet_base
self._bifpn_stage = None
def build(self, input_shape):
self._bifpn_stage = bifpn_generators.KerasBiFpnFeatureMaps(
bifpn_num_iterations=self._bifpn_num_iterations,
bifpn_num_filters=self._bifpn_num_filters,
fpn_min_level=self._bifpn_min_level,
fpn_max_level=self._bifpn_max_level,
input_max_level=self._backbone_max_level,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
bifpn_node_params=self._bifpn_node_params,
use_native_resize_op=self._use_native_resize_op,
name='bifpn')
self.built = True
def preprocess(self, inputs):
"""SSD preprocessing.
Channel-wise mean subtraction and scaling.
Args:
inputs: a [batch, height, width, channels] float tensor representing a
batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if inputs.shape.as_list()[3] == 3:
# Input images are expected to be in the range [0, 255].
channel_offset = [0.485, 0.456, 0.406]
channel_scale = [0.229, 0.224, 0.225]
return ((inputs / 255.0) - [[channel_offset]]) / [[channel_scale]]
else:
return inputs
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
base_feature_maps = self._efficientnet(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
output_feature_map_dict = self._bifpn_stage(
list(zip(self._output_layer_alias, base_feature_maps)))
return list(output_feature_map_dict.values())
class SSDEfficientNetB0BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=3,
bifpn_num_filters=64,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D0'):
"""SSD Keras EfficientNet-b0 BiFPN (EfficientDet-d0) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB0BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b0',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB1BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=4,
bifpn_num_filters=88,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D1'):
"""SSD Keras EfficientNet-b1 BiFPN (EfficientDet-d1) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB1BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b1',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB2BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=5,
bifpn_num_filters=112,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D2'):
"""SSD Keras EfficientNet-b2 BiFPN (EfficientDet-d2) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB2BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b2',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB3BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=6,
bifpn_num_filters=160,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D3'):
"""SSD Keras EfficientNet-b3 BiFPN (EfficientDet-d3) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB3BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b3',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB4BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=7,
bifpn_num_filters=224,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D4'):
"""SSD Keras EfficientNet-b4 BiFPN (EfficientDet-d4) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB4BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b4',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB5BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=7,
bifpn_num_filters=288,
bifpn_combine_method='fast_attention',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D5'):
"""SSD Keras EfficientNet-b5 BiFPN (EfficientDet-d5) Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB5BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b5',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB6BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=8,
bifpn_num_filters=384,
bifpn_combine_method='sum',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientDet-D6-D7'):
"""SSD Keras EfficientNet-b6 BiFPN (EfficientDet-d[6,7]) Feature Extractor.
SSD Keras EfficientNet-b6 BiFPN Feature Extractor, a.k.a. EfficientDet-d6
and EfficientDet-d7. The EfficientDet-d[6,7] models use the same backbone
EfficientNet-b6 and the same BiFPN architecture, and therefore have the same
number of parameters. They only differ in their input resolutions.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB6BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b6',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDEfficientNetB7BiFPNKerasFeatureExtractor(
SSDEfficientNetBiFPNKerasFeatureExtractor):
"""SSD Keras EfficientNet-b7 BiFPN Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=8,
bifpn_num_filters=384,
bifpn_combine_method='sum',
use_explicit_padding=None,
use_depthwise=None,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=None,
name='EfficientNet-B7_BiFPN'):
"""SSD Keras EfficientNet-b7 BiFPN Feature Extractor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: unsupported by EfficientNetBiFPN. float, depth
multiplier for the feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during training
or not. When training with a small batch size (e.g. 1), it is desirable
to freeze batch norm update and use pretrained batch norm params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
bifpn_min_level: the highest resolution feature map to use in BiFPN. The
valid values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
bifpn_max_level: the smallest resolution feature map to use in the BiFPN.
BiFPN constructions uses features maps starting from bifpn_min_level
upto the bifpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of BiFPN
levels.
bifpn_num_iterations: number of BiFPN iterations. Overrided if
efficientdet_version is provided.
bifpn_num_filters: number of filters (channels) in all BiFPN layers.
Overrided if efficientdet_version is provided.
bifpn_combine_method: the method used to combine BiFPN nodes.
use_explicit_padding: unsupported by EfficientNetBiFPN. Whether to use
explicit padding when extracting features.
use_depthwise: unsupported by EfficientNetBiFPN, since BiFPN uses regular
convolutions when inputs to a node have a differing number of channels,
and use separable convolutions after combine operations.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for BiFPN unsampling.
override_base_feature_extractor_hyperparams: unsupported. Whether to
override hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras will
auto-generate one from the class name.
"""
super(SSDEfficientNetB7BiFPNKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
bifpn_min_level=bifpn_min_level,
bifpn_max_level=bifpn_max_level,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version='efficientnet-b7',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
use_native_resize_op=use_native_resize_op,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
| 47,228 | 47.589506 | 96 | py |
models | models-master/research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnInceptionResnetV2FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 19, 19, 1088])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 28, 28, 1088])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 1088])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [2, 8, 8, 1536])
if __name__ == '__main__':
tf.test.main()
| 4,718 | 41.133929 | 102 | py |
models | models-master/research/object_detection/models/ssd_inception_v2_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for InceptionV2 features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import inception_v2
class SSDInceptionV2FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using InceptionV2 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False):
"""InceptionV2 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: If `override_base_feature_extractor_hyperparams` is False.
"""
super(SSDInceptionV2FeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
if not self._override_base_feature_extractor_hyperparams:
raise ValueError('SSD Inception V2 feature extractor always uses'
'scope returned by `conv_hyperparams_fn` for both the '
'base feature extractor and the additional layers '
'added since there is no arg_scope defined for the base '
'feature extractor.')
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
feature_map_layout = {
'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''
][:self._num_layers],
'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('InceptionV2',
reuse=self._reuse_weights) as scope:
_, image_features = inception_v2.inception_v2_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Mixed_5c',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
| 5,578 | 39.722628 | 80 | py |
models | models-master/research/object_detection/models/ssd_inception_v3_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.models.ssd_inception_v3_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_inception_v3_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdInceptionV3FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
is_training=True):
"""Constructs a SsdInceptionV3FeatureExtractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
is_training: whether the network is in training mode.
Returns:
an ssd_inception_v3_feature_extractor.SsdInceptionV3FeatureExtractor.
"""
min_depth = 32
return ssd_inception_v3_feature_extractor.SSDInceptionV3FeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=True)
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768),
(2, 2, 2, 2048), (2, 1, 1, 512),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768),
(2, 2, 2, 2048), (2, 1, 1, 512),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 35, 35, 288), (2, 17, 17, 768),
(2, 8, 8, 2048), (2, 4, 4, 512),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 35, 35, 128), (2, 17, 17, 128),
(2, 8, 8, 192), (2, 4, 4, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 37, 37, 288), (2, 18, 18, 768),
(2, 8, 8, 2048), (2, 4, 4, 512),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'InceptionV3'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name)
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 13, 13, 288), (2, 6, 6, 768),
(2, 2, 2, 2048), (2, 1, 1, 512)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, num_layers=4)
if __name__ == '__main__':
tf.test.main()
| 6,664 | 40.397516 | 80 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_feature_extractor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 FPN feature extractors."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_resnet_v1_fpn_feature_extractor_testbase
from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SSDResnet50V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet50v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=True):
is_training = True
return (ssd_resnet_v1_fpn_keras_feature_extractor.
SSDResNet50V1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
name='ResNet50V1_FPN'))
def _resnet_scope_name(self):
return 'ResNet50V1_FPN'
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SSDResnet101V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet101v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (ssd_resnet_v1_fpn_keras_feature_extractor.
SSDResNet101V1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
name='ResNet101V1_FPN'))
def _resnet_scope_name(self):
return 'ResNet101V1_FPN'
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SSDResnet152V1FeatureExtractorTest(
ssd_resnet_v1_fpn_feature_extractor_testbase.
SSDResnetFPNFeatureExtractorTestBase):
"""SSDResnet152v1Fpn feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, min_depth=32,
use_keras=False):
is_training = True
return (ssd_resnet_v1_fpn_keras_feature_extractor.
SSDResNet152V1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
name='ResNet152V1_FPN'))
def _resnet_scope_name(self):
return 'ResNet152V1_FPN'
if __name__ == '__main__':
tf.test.main()
| 4,210 | 39.490385 | 80 | py |
models | models-master/research/object_detection/models/feature_map_generators_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature map generators."""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import feature_map_generators
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
INCEPTION_V2_LAYOUT = {
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 256],
'anchor_strides': [16, 32, 64, -1, -1, -1],
'layer_target_norm': [20.0, -1, -1, -1, -1, -1],
}
INCEPTION_V3_LAYOUT = {
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128],
'anchor_strides': [16, 32, 64, -1, -1, -1],
'aspect_ratios': [1.0, 2.0, 1.0/2, 3.0, 1.0/3]
}
EMBEDDED_SSD_MOBILENET_V1_LAYOUT = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''],
'layer_depth': [-1, -1, 512, 256, 256],
'conv_kernel_size': [-1, -1, 3, 3, 2],
}
SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = {
'from_layer': ['Conv2d_13_pointwise', '', '', ''],
'layer_depth': [-1, 256, 256, 256],
}
class MultiResolutionFeatureMapGeneratorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_map_generator(self, feature_map_layout,
pool_residual=False):
if tf_version.is_tf2():
return feature_map_generators.KerasMultiResolutionFeatureMaps(
feature_map_layout=feature_map_layout,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
freeze_batchnorm=False,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
name='FeatureMaps'
)
else:
def feature_map_generator(image_features):
return feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
image_features=image_features,
pool_residual=pool_residual)
return feature_map_generator
def test_get_expected_feature_map_shapes_with_inception_v2(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V2_LAYOUT)
def graph_fn():
feature_maps = feature_map_generator(image_features)
return feature_maps
expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256),
'Mixed_4c': (4, 14, 14, 576),
'Mixed_5c': (4, 7, 7, 1024),
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise(
self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
layout_copy = INCEPTION_V2_LAYOUT.copy()
layout_copy['use_depthwise'] = True
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=layout_copy)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256),
'Mixed_4c': (4, 14, 14, 576),
'Mixed_5c': (4, 7, 7, 1024),
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_use_explicit_padding(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
layout_copy = INCEPTION_V2_LAYOUT.copy()
layout_copy['use_explicit_padding'] = True
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=layout_copy,
)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_3c': (4, 28, 28, 256),
'Mixed_4c': (4, 14, 14, 576),
'Mixed_5c': (4, 7, 7, 1024),
'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v3(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),
'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),
'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V3_LAYOUT,
)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Mixed_5d': (4, 35, 35, 256),
'Mixed_6e': (4, 17, 17, 576),
'Mixed_7c': (4, 8, 8, 1024),
'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1(
self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512],
dtype=tf.float32),
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT,
)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Conv2d_11_pointwise': (4, 16, 16, 512),
'Conv2d_13_pointwise': (4, 8, 8, 1024),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1(
self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT,
pool_residual=True
)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'Conv2d_13_pointwise': (4, 8, 8, 1024),
'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names_with_inception_v2(self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=INCEPTION_V2_LAYOUT,
)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([
'Mixed_5c_1_Conv2d_3_1x1_256/weights',
'Mixed_5c_1_Conv2d_3_1x1_256/biases',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases',
'Mixed_5c_1_Conv2d_4_1x1_128/weights',
'Mixed_5c_1_Conv2d_4_1x1_128/biases',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases',
'Mixed_5c_1_Conv2d_5_1x1_128/weights',
'Mixed_5c_1_Conv2d_5_1x1_128/biases',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases',
])
expected_keras_variables = set([
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias',
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',
])
if tf_version.is_tf2():
actual_variable_set = set(
[var.name.split(':')[0] for var in feature_map_generator.variables])
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
with g.as_default():
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
def test_get_expected_variable_names_with_inception_v2_use_depthwise(
self):
with test_utils.GraphContextOrNone() as g:
image_features = {
'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)
}
layout_copy = INCEPTION_V2_LAYOUT.copy()
layout_copy['use_depthwise'] = True
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=layout_copy,
)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([
'Mixed_5c_1_Conv2d_3_1x1_256/weights',
'Mixed_5c_1_Conv2d_3_1x1_256/biases',
'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/depthwise_weights',
'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/biases',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights',
'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases',
'Mixed_5c_1_Conv2d_4_1x1_128/weights',
'Mixed_5c_1_Conv2d_4_1x1_128/biases',
'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/depthwise_weights',
'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/biases',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases',
'Mixed_5c_1_Conv2d_5_1x1_128/weights',
'Mixed_5c_1_Conv2d_5_1x1_128/biases',
'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/depthwise_weights',
'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/biases',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights',
'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases',
])
expected_keras_variables = set([
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias',
('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/'
'depthwise_kernel'),
('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/'
'bias'),
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias',
('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/'
'depthwise_kernel'),
('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/'
'bias'),
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel',
'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias',
('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/'
'depthwise_kernel'),
('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/'
'bias'),
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel',
'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',
])
if tf_version.is_tf2():
actual_variable_set = set(
[var.name.split(':')[0] for var in feature_map_generator.variables])
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
with g.as_default():
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
@parameterized.parameters({'use_native_resize_op': True},
{'use_native_resize_op': False})
class FPNFeatureMapGeneratorTest(test_case.TestCase, parameterized.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _build_feature_map_generator(
self, image_features, depth, use_bounded_activations=False,
use_native_resize_op=False, use_explicit_padding=False,
use_depthwise=False):
if tf_version.is_tf2():
return feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=len(image_features),
depth=depth,
is_training=True,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
use_depthwise=use_depthwise,
use_explicit_padding=use_explicit_padding,
use_bounded_activations=use_bounded_activations,
use_native_resize_op=use_native_resize_op,
scope=None,
name='FeatureMaps',
)
else:
def feature_map_generator(image_features):
return feature_map_generators.fpn_top_down_feature_maps(
image_features=image_features,
depth=depth,
use_depthwise=use_depthwise,
use_explicit_padding=use_explicit_padding,
use_bounded_activations=use_bounded_activations,
use_native_resize_op=use_native_resize_op)
return feature_map_generator
def test_get_expected_feature_map_shapes(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128),
'top_down_block3': (4, 4, 4, 128),
'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128)
}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_explicit_padding(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_explicit_padding=True,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128),
'top_down_block3': (4, 4, 4, 128),
'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128)
}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
def test_use_bounded_activations_add_operations(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [('block2',
tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3',
tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4',
tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5',
tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_bounded_activations=True,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_added_operations = dict.fromkeys([
'top_down/clip_by_value', 'top_down/clip_by_value_1',
'top_down/clip_by_value_2', 'top_down/clip_by_value_3',
'top_down/clip_by_value_4', 'top_down/clip_by_value_5',
'top_down/clip_by_value_6'
])
op_names = {op.name: None for op in g.get_operations()}
self.assertDictContainsSubset(expected_added_operations, op_names)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
def test_use_bounded_activations_clip_value(
self, use_native_resize_op):
tf_graph = tf.Graph()
with tf_graph.as_default():
image_features = [
('block2', 255 * tf.ones([4, 8, 8, 256], dtype=tf.float32)),
('block3', 255 * tf.ones([4, 4, 4, 256], dtype=tf.float32)),
('block4', 255 * tf.ones([4, 2, 2, 256], dtype=tf.float32)),
('block5', 255 * tf.ones([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_bounded_activations=True,
use_native_resize_op=use_native_resize_op)
feature_map_generator(image_features)
expected_clip_by_value_ops = [
'top_down/clip_by_value', 'top_down/clip_by_value_1',
'top_down/clip_by_value_2', 'top_down/clip_by_value_3',
'top_down/clip_by_value_4', 'top_down/clip_by_value_5',
'top_down/clip_by_value_6'
]
# Gathers activation tensors before and after clip_by_value operations.
activations = {}
for clip_by_value_op in expected_clip_by_value_ops:
clip_input_tensor = tf_graph.get_operation_by_name(
'{}/Minimum'.format(clip_by_value_op)).inputs[0]
clip_output_tensor = tf_graph.get_tensor_by_name(
'{}:0'.format(clip_by_value_op))
activations.update({
'before_{}'.format(clip_by_value_op): clip_input_tensor,
'after_{}'.format(clip_by_value_op): clip_output_tensor,
})
expected_lower_bound = -feature_map_generators.ACTIVATION_BOUND
expected_upper_bound = feature_map_generators.ACTIVATION_BOUND
init_op = tf.global_variables_initializer()
with self.test_session() as session:
session.run(init_op)
activations_output = session.run(activations)
for clip_by_value_op in expected_clip_by_value_ops:
# Before clipping, activations are beyound the expected bound because
# of large input image_features values.
activations_before_clipping = (
activations_output['before_{}'.format(clip_by_value_op)])
before_clipping_lower_bound = np.amin(activations_before_clipping)
before_clipping_upper_bound = np.amax(activations_before_clipping)
self.assertLessEqual(before_clipping_lower_bound,
expected_lower_bound)
self.assertGreaterEqual(before_clipping_upper_bound,
expected_upper_bound)
# After clipping, activations are bounded as expectation.
activations_after_clipping = (
activations_output['after_{}'.format(clip_by_value_op)])
after_clipping_lower_bound = np.amin(activations_after_clipping)
after_clipping_upper_bound = np.amax(activations_after_clipping)
self.assertGreaterEqual(after_clipping_lower_bound,
expected_lower_bound)
self.assertLessEqual(after_clipping_upper_bound, expected_upper_bound)
def test_get_expected_feature_map_shapes_with_depthwise(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_depthwise=True,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
expected_feature_map_shapes = {
'top_down_block2': (4, 8, 8, 128),
'top_down_block3': (4, 4, 4, 128),
'top_down_block4': (4, 2, 2, 128),
'top_down_block5': (4, 1, 1, 128)
}
out_feature_maps = self.execute(graph_fn, [], g)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
def test_get_expected_variable_names(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([
'projection_1/weights',
'projection_1/biases',
'projection_2/weights',
'projection_2/biases',
'projection_3/weights',
'projection_3/biases',
'projection_4/weights',
'projection_4/biases',
'smoothing_1/weights',
'smoothing_1/biases',
'smoothing_2/weights',
'smoothing_2/biases',
'smoothing_3/weights',
'smoothing_3/biases',
])
expected_keras_variables = set([
'FeatureMaps/top_down/projection_1/kernel',
'FeatureMaps/top_down/projection_1/bias',
'FeatureMaps/top_down/projection_2/kernel',
'FeatureMaps/top_down/projection_2/bias',
'FeatureMaps/top_down/projection_3/kernel',
'FeatureMaps/top_down/projection_3/bias',
'FeatureMaps/top_down/projection_4/kernel',
'FeatureMaps/top_down/projection_4/bias',
'FeatureMaps/top_down/smoothing_1_conv/kernel',
'FeatureMaps/top_down/smoothing_1_conv/bias',
'FeatureMaps/top_down/smoothing_2_conv/kernel',
'FeatureMaps/top_down/smoothing_2_conv/bias',
'FeatureMaps/top_down/smoothing_3_conv/kernel',
'FeatureMaps/top_down/smoothing_3_conv/bias'
])
if tf_version.is_tf2():
actual_variable_set = set(
[var.name.split(':')[0] for var in feature_map_generator.variables])
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
with g.as_default():
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
def test_get_expected_variable_names_with_depthwise(
self, use_native_resize_op):
with test_utils.GraphContextOrNone() as g:
image_features = [
('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),
('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),
('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),
('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))
]
feature_map_generator = self._build_feature_map_generator(
image_features=image_features,
depth=128,
use_depthwise=True,
use_native_resize_op=use_native_resize_op)
def graph_fn():
return feature_map_generator(image_features)
self.execute(graph_fn, [], g)
expected_slim_variables = set([
'projection_1/weights',
'projection_1/biases',
'projection_2/weights',
'projection_2/biases',
'projection_3/weights',
'projection_3/biases',
'projection_4/weights',
'projection_4/biases',
'smoothing_1/depthwise_weights',
'smoothing_1/pointwise_weights',
'smoothing_1/biases',
'smoothing_2/depthwise_weights',
'smoothing_2/pointwise_weights',
'smoothing_2/biases',
'smoothing_3/depthwise_weights',
'smoothing_3/pointwise_weights',
'smoothing_3/biases',
])
expected_keras_variables = set([
'FeatureMaps/top_down/projection_1/kernel',
'FeatureMaps/top_down/projection_1/bias',
'FeatureMaps/top_down/projection_2/kernel',
'FeatureMaps/top_down/projection_2/bias',
'FeatureMaps/top_down/projection_3/kernel',
'FeatureMaps/top_down/projection_3/bias',
'FeatureMaps/top_down/projection_4/kernel',
'FeatureMaps/top_down/projection_4/bias',
'FeatureMaps/top_down/smoothing_1_depthwise_conv/depthwise_kernel',
'FeatureMaps/top_down/smoothing_1_depthwise_conv/pointwise_kernel',
'FeatureMaps/top_down/smoothing_1_depthwise_conv/bias',
'FeatureMaps/top_down/smoothing_2_depthwise_conv/depthwise_kernel',
'FeatureMaps/top_down/smoothing_2_depthwise_conv/pointwise_kernel',
'FeatureMaps/top_down/smoothing_2_depthwise_conv/bias',
'FeatureMaps/top_down/smoothing_3_depthwise_conv/depthwise_kernel',
'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel',
'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias'
])
if tf_version.is_tf2():
actual_variable_set = set(
[var.name.split(':')[0] for var in feature_map_generator.variables])
self.assertSetEqual(expected_keras_variables, actual_variable_set)
else:
with g.as_default():
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
self.assertSetEqual(expected_slim_variables, actual_variable_set)
class GetDepthFunctionTest(tf.test.TestCase):
def test_return_min_depth_when_multiplier_is_small(self):
depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,
min_depth=16)
self.assertEqual(depth_fn(16), 16)
def test_return_correct_depth_with_multiplier(self):
depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,
min_depth=16)
self.assertEqual(depth_fn(64), 32)
@parameterized.parameters(
{'replace_pool_with_conv': False},
{'replace_pool_with_conv': True},
)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase):
def test_get_expected_feature_map_shapes(self, replace_pool_with_conv):
image_features = {
'image_features': tf.random_uniform([4, 19, 19, 1024])
}
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=1024,
num_layers=6,
image_features=image_features,
replace_pool_with_conv=replace_pool_with_conv)
expected_pool_feature_map_shapes = {
'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),
'MaxPool2d_0_2x2': (4, 10, 10, 1024),
'MaxPool2d_1_2x2': (4, 5, 5, 1024),
'MaxPool2d_2_2x2': (4, 3, 3, 1024),
'MaxPool2d_3_2x2': (4, 2, 2, 1024),
'MaxPool2d_4_2x2': (4, 1, 1, 1024),
}
expected_conv_feature_map_shapes = {
'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),
'Conv2d_0_3x3_s2_1024': (4, 10, 10, 1024),
'Conv2d_1_3x3_s2_1024': (4, 5, 5, 1024),
'Conv2d_2_3x3_s2_1024': (4, 3, 3, 1024),
'Conv2d_3_3x3_s2_1024': (4, 2, 2, 1024),
'Conv2d_4_3x3_s2_1024': (4, 1, 1, 1024),
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = {key: value.shape
for key, value in out_feature_maps.items()}
if replace_pool_with_conv:
self.assertDictEqual(expected_conv_feature_map_shapes,
out_feature_map_shapes)
else:
self.assertDictEqual(expected_pool_feature_map_shapes,
out_feature_map_shapes)
def test_get_expected_variable_names(self, replace_pool_with_conv):
image_features = {
'image_features': tf.random_uniform([4, 19, 19, 1024])
}
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=1024,
num_layers=6,
image_features=image_features,
replace_pool_with_conv=replace_pool_with_conv)
expected_pool_variables = set([
'Base_Conv2d_1x1_1024/weights',
'Base_Conv2d_1x1_1024/biases',
])
expected_conv_variables = set([
'Base_Conv2d_1x1_1024/weights',
'Base_Conv2d_1x1_1024/biases',
'Conv2d_0_3x3_s2_1024/weights',
'Conv2d_0_3x3_s2_1024/biases',
'Conv2d_1_3x3_s2_1024/weights',
'Conv2d_1_3x3_s2_1024/biases',
'Conv2d_2_3x3_s2_1024/weights',
'Conv2d_2_3x3_s2_1024/biases',
'Conv2d_3_3x3_s2_1024/weights',
'Conv2d_3_3x3_s2_1024/biases',
'Conv2d_4_3x3_s2_1024/weights',
'Conv2d_4_3x3_s2_1024/biases',
])
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
sess.run(feature_maps)
actual_variable_set = set(
[var.op.name for var in tf.trainable_variables()])
if replace_pool_with_conv:
self.assertSetEqual(expected_conv_variables, actual_variable_set)
else:
self.assertSetEqual(expected_pool_variables, actual_variable_set)
if __name__ == '__main__':
tf.test.main()
| 35,575 | 41.201661 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as faster_rcnn_resnet_v1
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnResnetV1FeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self,
first_stage_features_stride,
activation_fn=tf.nn.relu,
architecture='resnet_v1_101'):
feature_extractor_map = {
'resnet_v1_50':
faster_rcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor,
'resnet_v1_101':
faster_rcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor,
'resnet_v1_152':
faster_rcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor
}
return feature_extractor_map[architecture](
is_training=False,
first_stage_features_stride=first_stage_features_stride,
activation_fn=activation_fn,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']:
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16, architecture=architecture)
preprocessed_inputs = tf.random_uniform(
[4, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [4, 14, 14, 1024])
def test_extract_proposal_features_stride_eight(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=8)
preprocessed_inputs = tf.random_uniform(
[4, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [4, 28, 28, 1024])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 1024])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_on_very_small_images(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(
features_shape,
feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)})
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[3, 7, 7, 1024], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [3, 7, 7, 2048])
def test_overwriting_activation_fn(self):
for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']:
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16,
architecture=architecture,
activation_fn=tf.nn.relu6)
preprocessed_inputs = tf.random_uniform([4, 224, 224, 3],
maxval=255,
dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestStage1Scope')
_ = feature_extractor.extract_box_classifier_features(
rpn_feature_map, scope='TestStaget2Scope')
conv_ops = [
op for op in tf.get_default_graph().get_operations()
if op.type == 'Relu6'
]
op_names = [op.name for op in conv_ops]
self.assertIsNotNone(conv_ops)
self.assertIn('TestStage1Scope/resnet_v1_50/resnet_v1_50/conv1/Relu6',
op_names)
self.assertIn(
'TestStaget2Scope/resnet_v1_50/block4/unit_1/bottleneck_v1/conv1/Relu6',
op_names)
if __name__ == '__main__':
tf.test.main()
| 7,200 | 41.863095 | 100 | py |
models | models-master/research/object_detection/models/ssd_spaghettinet_feature_extractor_tf1_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_spaghettinet_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_spaghettinet_feature_extractor
from object_detection.utils import tf_version
try:
from tensorflow.contrib import quantize as contrib_quantize # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDSpaghettiNetFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, arch_name, is_training=True):
return ssd_spaghettinet_feature_extractor.SSDSpaghettinetFeatureExtractor(
is_training=is_training,
spaghettinet_arch_name=arch_name,
depth_multiplier=1.0,
min_depth=4,
pad_to_multiple=1,
conv_hyperparams_fn=self.conv_hyperparams_fn)
def _test_spaghettinet_returns_correct_shapes(self, arch_name,
expected_feature_map_shapes):
image = tf.random.normal((1, 320, 320, 3))
feature_extractor = self._create_feature_extractor(arch_name)
feature_maps = feature_extractor.extract_features(image)
self.assertEqual(len(expected_feature_map_shapes), len(feature_maps))
for expected_shape, x in zip(expected_feature_map_shapes, feature_maps):
self.assertTrue(x.shape.is_compatible_with(expected_shape))
def test_spaghettinet_edgetpu_s(self):
expected_feature_map_shapes = [(1, 20, 20, 120), (1, 10, 10, 168),
(1, 5, 5, 136), (1, 3, 3, 136),
(1, 3, 3, 64)]
self._test_spaghettinet_returns_correct_shapes('spaghettinet_edgetpu_s',
expected_feature_map_shapes)
def test_spaghettinet_edgetpu_m(self):
expected_feature_map_shapes = [(1, 20, 20, 120), (1, 10, 10, 168),
(1, 5, 5, 136), (1, 3, 3, 136),
(1, 3, 3, 64)]
self._test_spaghettinet_returns_correct_shapes('spaghettinet_edgetpu_m',
expected_feature_map_shapes)
def test_spaghettinet_edgetpu_l(self):
expected_feature_map_shapes = [(1, 20, 20, 120), (1, 10, 10, 168),
(1, 5, 5, 112), (1, 3, 3, 128),
(1, 3, 3, 64)]
self._test_spaghettinet_returns_correct_shapes('spaghettinet_edgetpu_l',
expected_feature_map_shapes)
def _check_quantization(self, model_fn):
checkpoint_dir = self.get_temp_dir()
with tf.Graph().as_default() as training_graph:
model_fn(is_training=True)
contrib_quantize.experimental_create_training_graph(training_graph)
with self.session(graph=training_graph) as sess:
sess.run(tf.global_variables_initializer())
tf.train.Saver().save(sess, checkpoint_dir)
with tf.Graph().as_default() as eval_graph:
model_fn(is_training=False)
contrib_quantize.experimental_create_eval_graph(eval_graph)
with self.session(graph=eval_graph) as sess:
tf.train.Saver().restore(sess, checkpoint_dir)
def _test_spaghettinet_quantization(self, arch_name):
def model_fn(is_training):
image = tf.random.normal((1, 320, 320, 3))
feature_extractor = self._create_feature_extractor(
arch_name, is_training=is_training)
feature_extractor.extract_features(image)
self._check_quantization(model_fn)
def test_spaghettinet_edgetpu_s_quantization(self):
self._test_spaghettinet_quantization('spaghettinet_edgetpu_s')
def test_spaghettinet_edgetpu_m_quantization(self):
self._test_spaghettinet_quantization('spaghettinet_edgetpu_m')
def test_spaghettinet_edgetpu_l_quantization(self):
self._test_spaghettinet_quantization('spaghettinet_edgetpu_l')
if __name__ == '__main__':
tf.test.main()
| 4,749 | 42.577982 | 100 | py |
models | models-master/research/object_detection/models/center_net_mobilenet_v2_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing mobilenet_v2 feature extractor for CenterNet."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_mobilenet_v2_feature_extractor
from object_detection.models.keras_models import mobilenet_v2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMobileNetV2FeatureExtractorTest(test_case.TestCase):
def test_center_net_mobilenet_v2_feature_extractor(self):
net = mobilenet_v2.mobilenet_v2(True, include_top=False)
model = center_net_mobilenet_v2_feature_extractor.CenterNetMobileNetV2FeatureExtractor(
net)
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 64))
if __name__ == '__main__':
tf.test.main()
| 1,713 | 35.468085 | 91 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v3_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobileNetV3 features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v3
class SSDMobileNetV3FeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor):
"""Base class of SSD feature extractor using MobilenetV3 features."""
def __init__(self,
conv_defs,
from_layer,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobilenetV3'):
"""MobileNetV3 Feature Extractor for SSD Models.
MobileNet v3. Details found in:
https://arxiv.org/abs/1905.02244
Args:
conv_defs: MobileNetV3 conv defs for backbone.
from_layer: A cell of two layer names (string) to connect to the 1st and
2nd inputs of the SSD head.
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
scope_name: scope name (string) of network variables.
"""
super(SSDMobileNetV3FeatureExtractorBase, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams
)
self._conv_defs = conv_defs
self._from_layer = from_layer
self._scope_name = scope_name
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Raises:
ValueError if conv_defs is not provided or from_layer does not meet the
size requirement.
"""
if not self._conv_defs:
raise ValueError('Must provide backbone conv defs.')
if len(self._from_layer) != 2:
raise ValueError('SSD input feature names are not provided.')
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
feature_map_layout = {
'from_layer': [
self._from_layer[0], self._from_layer[1], '', '', '', ''
],
'layer_depth': [-1, -1, 512, 256, 256, 128],
'use_depthwise': self._use_depthwise,
'use_explicit_padding': self._use_explicit_padding,
}
with tf.variable_scope(
self._scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v3.training_scope(is_training=None, bn_decay=0.9997)), \
slim.arg_scope(
[mobilenet.depth_multiplier], min_depth=self._min_depth):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = mobilenet_v3.mobilenet_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
conv_defs=self._conv_defs,
final_endpoint=self._from_layer[1],
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
class SSDMobileNetV3LargeFeatureExtractor(SSDMobileNetV3FeatureExtractorBase):
"""Mobilenet V3-Large feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobilenetV3'):
super(SSDMobileNetV3LargeFeatureExtractor, self).__init__(
conv_defs=mobilenet_v3.V3_LARGE_DETECTION,
from_layer=['layer_14/expansion_output', 'layer_17'],
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name
)
class SSDMobileNetV3SmallFeatureExtractor(SSDMobileNetV3FeatureExtractorBase):
"""Mobilenet V3-Small feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobilenetV3'):
super(SSDMobileNetV3SmallFeatureExtractor, self).__init__(
conv_defs=mobilenet_v3.V3_SMALL_DETECTION,
from_layer=['layer_10/expansion_output', 'layer_13'],
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name
)
class SSDMobileNetV3SmallPrunedFeatureExtractor(
SSDMobileNetV3FeatureExtractorBase):
"""Mobilenet V3-Small feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobilenetV3'):
super(SSDMobileNetV3SmallPrunedFeatureExtractor, self).__init__(
conv_defs=mobilenet_v3.V3_SMALL_PRUNED_DETECTION,
from_layer=['layer_9/expansion_output', 'layer_12'],
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name)
| 9,828 | 38.473896 | 96 | py |
models | models-master/research/object_detection/models/ssd_mobiledet_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobileDet features."""
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from object_detection.utils import shape_utils
BACKBONE_WEIGHT_DECAY = 4e-5
def _scale_filters(filters, multiplier, base=8):
"""Scale the filters accordingly to (multiplier, base)."""
round_half_up = int(int(filters) * multiplier / base + 0.5)
result = int(round_half_up * base)
return max(result, base)
def _swish6(h):
with tf.name_scope('swish6'):
return h * tf.nn.relu6(h + np.float32(3)) * np.float32(1. / 6.)
def _conv(h, filters, kernel_size, strides=1,
normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6):
if activation_fn is None:
raise ValueError('Activation function cannot be None. Use tf.identity '
'instead to better support quantized training.')
return slim.conv2d(
h,
filters,
kernel_size,
stride=strides,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weights_initializer=tf.initializers.he_normal(),
weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY),
padding='SAME')
def _separable_conv(
h, filters, kernel_size, strides=1, activation_fn=tf.nn.relu6):
"""Separable convolution layer."""
if activation_fn is None:
raise ValueError('Activation function cannot be None. Use tf.identity '
'instead to better support quantized training.')
# Depthwise variant of He initialization derived under the principle proposed
# in the original paper. Note the original He normalization was designed for
# full convolutions and calling tf.initializers.he_normal() can over-estimate
# the fan-in of a depthwise kernel by orders of magnitude.
stddev = (2.0 / kernel_size**2)**0.5 / .87962566103423978
depthwise_initializer = tf.initializers.truncated_normal(stddev=stddev)
return slim.separable_conv2d(
h,
filters,
kernel_size,
stride=strides,
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm,
weights_initializer=depthwise_initializer,
pointwise_initializer=tf.initializers.he_normal(),
weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY),
padding='SAME')
def _squeeze_and_excite(h, hidden_dim, activation_fn=tf.nn.relu6):
with tf.variable_scope(None, default_name='SqueezeExcite'):
height, width = h.shape[1], h.shape[2]
u = slim.avg_pool2d(h, [height, width], stride=1, padding='VALID')
u = _conv(u, hidden_dim, 1,
normalizer_fn=None, activation_fn=activation_fn)
u = _conv(u, h.shape[-1], 1,
normalizer_fn=None, activation_fn=tf.nn.sigmoid)
return u * h
def _inverted_bottleneck_no_expansion(
h, filters, activation_fn=tf.nn.relu6,
kernel_size=3, strides=1, use_se=False):
"""Inverted bottleneck layer without the first 1x1 expansion convolution."""
with tf.variable_scope(None, default_name='IBNNoExpansion'):
# Setting filters to None will make _separable_conv a depthwise conv.
h = _separable_conv(
h, None, kernel_size, strides=strides, activation_fn=activation_fn)
if use_se:
hidden_dim = _scale_filters(h.shape[-1], 0.25)
h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn)
h = _conv(h, filters, 1, activation_fn=tf.identity)
return h
def _inverted_bottleneck(
h, filters, activation_fn=tf.nn.relu6,
kernel_size=3, expansion=8, strides=1, use_se=False, residual=True):
"""Inverted bottleneck layer."""
with tf.variable_scope(None, default_name='IBN'):
shortcut = h
expanded_filters = int(h.shape[-1]) * expansion
if expansion <= 1:
raise ValueError('Expansion factor must be greater than 1.')
h = _conv(h, expanded_filters, 1, activation_fn=activation_fn)
# Setting filters to None will make _separable_conv a depthwise conv.
h = _separable_conv(h, None, kernel_size, strides=strides,
activation_fn=activation_fn)
if use_se:
hidden_dim = _scale_filters(expanded_filters, 0.25)
h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn)
h = _conv(h, filters, 1, activation_fn=tf.identity)
if residual:
h = h + shortcut
return h
def _fused_conv(
h, filters, activation_fn=tf.nn.relu6,
kernel_size=3, expansion=8, strides=1, use_se=False, residual=True):
"""Fused convolution layer."""
with tf.variable_scope(None, default_name='FusedConv'):
shortcut = h
expanded_filters = int(h.shape[-1]) * expansion
if expansion <= 1:
raise ValueError('Expansion factor must be greater than 1.')
h = _conv(h, expanded_filters, kernel_size, strides=strides,
activation_fn=activation_fn)
if use_se:
hidden_dim = _scale_filters(expanded_filters, 0.25)
h = _squeeze_and_excite(h, hidden_dim, activation_fn=activation_fn)
h = _conv(h, filters, 1, activation_fn=tf.identity)
if residual:
h = h + shortcut
return h
def _tucker_conv(
h, filters, activation_fn=tf.nn.relu6,
kernel_size=3, input_rank_ratio=0.25, output_rank_ratio=0.25,
strides=1, residual=True):
"""Tucker convolution layer (generalized bottleneck)."""
with tf.variable_scope(None, default_name='TuckerConv'):
shortcut = h
input_rank = _scale_filters(h.shape[-1], input_rank_ratio)
h = _conv(h, input_rank, 1, activation_fn=activation_fn)
output_rank = _scale_filters(filters, output_rank_ratio)
h = _conv(h, output_rank, kernel_size, strides=strides,
activation_fn=activation_fn)
h = _conv(h, filters, 1, activation_fn=tf.identity)
if residual:
h = h + shortcut
return h
def mobiledet_cpu_backbone(h, multiplier=1.0):
"""Build a MobileDet CPU backbone."""
def _scale(filters):
return _scale_filters(filters, multiplier)
ibn = functools.partial(
_inverted_bottleneck, use_se=True, activation_fn=_swish6)
endpoints = {}
h = _conv(h, _scale(16), 3, strides=2, activation_fn=_swish6)
h = _inverted_bottleneck_no_expansion(
h, _scale(8), use_se=True, activation_fn=_swish6)
endpoints['C1'] = h
h = ibn(h, _scale(16), expansion=4, strides=2, residual=False)
endpoints['C2'] = h
h = ibn(h, _scale(32), expansion=8, strides=2, residual=False)
h = ibn(h, _scale(32), expansion=4)
h = ibn(h, _scale(32), expansion=4)
h = ibn(h, _scale(32), expansion=4)
endpoints['C3'] = h
h = ibn(h, _scale(72), kernel_size=5, expansion=8, strides=2, residual=False)
h = ibn(h, _scale(72), expansion=8)
h = ibn(h, _scale(72), kernel_size=5, expansion=4)
h = ibn(h, _scale(72), expansion=4)
h = ibn(h, _scale(72), expansion=8, residual=False)
h = ibn(h, _scale(72), expansion=8)
h = ibn(h, _scale(72), expansion=8)
h = ibn(h, _scale(72), expansion=8)
endpoints['C4'] = h
h = ibn(h, _scale(104), kernel_size=5, expansion=8, strides=2, residual=False)
h = ibn(h, _scale(104), kernel_size=5, expansion=4)
h = ibn(h, _scale(104), kernel_size=5, expansion=4)
h = ibn(h, _scale(104), expansion=4)
h = ibn(h, _scale(144), expansion=8, residual=False)
endpoints['C5'] = h
return endpoints
def mobiledet_dsp_backbone(h, multiplier=1.0):
"""Build a MobileDet DSP backbone."""
def _scale(filters):
return _scale_filters(filters, multiplier)
ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6)
fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6)
tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6)
endpoints = {}
h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6)
h = _inverted_bottleneck_no_expansion(
h, _scale(24), activation_fn=tf.nn.relu6)
endpoints['C1'] = h
h = fused(h, _scale(32), expansion=4, strides=2, residual=False)
h = fused(h, _scale(32), expansion=4)
h = ibn(h, _scale(32), expansion=4)
h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.75)
endpoints['C2'] = h
h = fused(h, _scale(64), expansion=8, strides=2, residual=False)
h = ibn(h, _scale(64), expansion=4)
h = fused(h, _scale(64), expansion=4)
h = fused(h, _scale(64), expansion=4)
endpoints['C3'] = h
h = fused(h, _scale(120), expansion=8, strides=2, residual=False)
h = ibn(h, _scale(120), expansion=4)
h = ibn(h, _scale(120), expansion=8)
h = ibn(h, _scale(120), expansion=8)
h = fused(h, _scale(144), expansion=8, residual=False)
h = ibn(h, _scale(144), expansion=8)
h = ibn(h, _scale(144), expansion=8)
h = ibn(h, _scale(144), expansion=8)
endpoints['C4'] = h
h = ibn(h, _scale(160), expansion=4, strides=2, residual=False)
h = ibn(h, _scale(160), expansion=4)
h = fused(h, _scale(160), expansion=4)
h = tucker(h, _scale(160), input_rank_ratio=0.75, output_rank_ratio=0.75)
h = ibn(h, _scale(240), expansion=8, residual=False)
endpoints['C5'] = h
return endpoints
def mobiledet_edgetpu_backbone(h, multiplier=1.0):
"""Build a MobileDet EdgeTPU backbone."""
def _scale(filters):
return _scale_filters(filters, multiplier)
ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6)
fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6)
tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6)
endpoints = {}
h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6)
h = tucker(h, _scale(16),
input_rank_ratio=0.25, output_rank_ratio=0.75, residual=False)
endpoints['C1'] = h
h = fused(h, _scale(16), expansion=8, strides=2, residual=False)
h = fused(h, _scale(16), expansion=4)
h = fused(h, _scale(16), expansion=8)
h = fused(h, _scale(16), expansion=4)
endpoints['C2'] = h
h = fused(h, _scale(40), expansion=8, kernel_size=5, strides=2,
residual=False)
h = fused(h, _scale(40), expansion=4)
h = fused(h, _scale(40), expansion=4)
h = fused(h, _scale(40), expansion=4)
endpoints['C3'] = h
h = ibn(h, _scale(72), expansion=8, strides=2, residual=False)
h = ibn(h, _scale(72), expansion=8)
h = fused(h, _scale(72), expansion=4)
h = fused(h, _scale(72), expansion=4)
h = ibn(h, _scale(96), expansion=8, kernel_size=5, residual=False)
h = ibn(h, _scale(96), expansion=8, kernel_size=5)
h = ibn(h, _scale(96), expansion=8)
h = ibn(h, _scale(96), expansion=8)
endpoints['C4'] = h
h = ibn(h, _scale(120), expansion=8, kernel_size=5, strides=2, residual=False)
h = ibn(h, _scale(120), expansion=8)
h = ibn(h, _scale(120), expansion=4, kernel_size=5)
h = ibn(h, _scale(120), expansion=8)
h = ibn(h, _scale(384), expansion=8, kernel_size=5, residual=False)
endpoints['C5'] = h
return endpoints
def mobiledet_gpu_backbone(h, multiplier=1.0):
"""Build a MobileDet GPU backbone."""
def _scale(filters):
return _scale_filters(filters, multiplier)
ibn = functools.partial(_inverted_bottleneck, activation_fn=tf.nn.relu6)
fused = functools.partial(_fused_conv, activation_fn=tf.nn.relu6)
tucker = functools.partial(_tucker_conv, activation_fn=tf.nn.relu6)
endpoints = {}
# block 0
h = _conv(h, _scale(32), 3, strides=2, activation_fn=tf.nn.relu6)
# block 1
h = tucker(
h,
_scale(16),
input_rank_ratio=0.25,
output_rank_ratio=0.25,
residual=False)
endpoints['C1'] = h
# block 2
h = fused(h, _scale(32), expansion=8, strides=2, residual=False)
h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25)
h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25)
h = tucker(h, _scale(32), input_rank_ratio=0.25, output_rank_ratio=0.25)
endpoints['C2'] = h
# block 3
h = fused(
h, _scale(64), expansion=8, kernel_size=3, strides=2, residual=False)
h = fused(h, _scale(64), expansion=8)
h = fused(h, _scale(64), expansion=8)
h = fused(h, _scale(64), expansion=4)
endpoints['C3'] = h
# block 4
h = fused(
h, _scale(128), expansion=8, kernel_size=3, strides=2, residual=False)
h = fused(h, _scale(128), expansion=4)
h = fused(h, _scale(128), expansion=4)
h = fused(h, _scale(128), expansion=4)
# block 5
h = fused(
h, _scale(128), expansion=8, kernel_size=3, strides=1, residual=False)
h = fused(h, _scale(128), expansion=8)
h = fused(h, _scale(128), expansion=8)
h = fused(h, _scale(128), expansion=8)
endpoints['C4'] = h
# block 6
h = fused(
h, _scale(128), expansion=4, kernel_size=3, strides=2, residual=False)
h = fused(h, _scale(128), expansion=4)
h = fused(h, _scale(128), expansion=4)
h = fused(h, _scale(128), expansion=4)
# block 7
h = ibn(h, _scale(384), expansion=8, kernel_size=3, strides=1, residual=False)
endpoints['C5'] = h
return endpoints
class SSDMobileDetFeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor):
"""Base class of SSD feature extractor using MobileDet features."""
def __init__(self,
backbone_fn,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobileDet'):
"""MobileDet Feature Extractor for SSD Models.
Reference:
https://arxiv.org/abs/2004.14525
Args:
backbone_fn: function to construct the MobileDet backbone.
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: Integer, minimum feature extractor depth (number of filters).
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features.
use_depthwise: Whether to use depthwise convolutions in the SSD head.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
scope_name: scope name (string) of network variables.
"""
if use_explicit_padding:
raise NotImplementedError(
'Explicit padding is not yet supported in MobileDet backbones.')
super(SSDMobileDetFeatureExtractorBase, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams
)
self._backbone_fn = backbone_fn
self._scope_name = scope_name
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1]. The preprocessing assumes an input
value range of [0, 255].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
padded_inputs = ops.pad_to_multiple(
preprocessed_inputs, self._pad_to_multiple)
feature_map_layout = {
'from_layer': ['C4', 'C5', '', '', '', ''],
# Do not specify the layer depths (number of filters) for C4 and C5, as
# their values are determined based on the backbone.
'layer_depth': [-1, -1, 512, 256, 256, 128],
'use_depthwise': self._use_depthwise,
'use_explicit_padding': self._use_explicit_padding,
}
with tf.variable_scope(self._scope_name, reuse=self._reuse_weights):
with slim.arg_scope([slim.batch_norm],
is_training=self._is_training,
epsilon=0.01, decay=0.99, center=True, scale=True):
endpoints = self._backbone_fn(
padded_inputs,
multiplier=self._depth_multiplier)
image_features = {'C4': endpoints['C4'], 'C5': endpoints['C5']}
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
class SSDMobileDetCPUFeatureExtractor(SSDMobileDetFeatureExtractorBase):
"""MobileDet-CPU feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobileDetCPU'):
super(SSDMobileDetCPUFeatureExtractor, self).__init__(
backbone_fn=mobiledet_cpu_backbone,
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name)
class SSDMobileDetDSPFeatureExtractor(SSDMobileDetFeatureExtractorBase):
"""MobileDet-DSP feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobileDetDSP'):
super(SSDMobileDetDSPFeatureExtractor, self).__init__(
backbone_fn=mobiledet_dsp_backbone,
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name)
class SSDMobileDetEdgeTPUFeatureExtractor(SSDMobileDetFeatureExtractorBase):
"""MobileDet-EdgeTPU feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobileDetEdgeTPU'):
super(SSDMobileDetEdgeTPUFeatureExtractor, self).__init__(
backbone_fn=mobiledet_edgetpu_backbone,
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name)
class SSDMobileDetGPUFeatureExtractor(SSDMobileDetFeatureExtractorBase):
"""MobileDet-GPU feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobileDetGPU'):
super(SSDMobileDetGPUFeatureExtractor, self).__init__(
backbone_fn=mobiledet_gpu_backbone,
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name)
| 22,520 | 37.366269 | 96 | py |
models | models-master/research/object_detection/models/faster_rcnn_inception_resnet_v2_keras_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception Resnet v2 Faster R-CNN implementation in Keras.
See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261)
as well as
"Speed/accuracy trade-offs for modern convolutional object detectors" by
Huang et al. (https://arxiv.org/abs/1611.10012)
"""
# Skip pylint for this file because it times out
# pylint: skip-file
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models.keras_models import inception_resnet_v2
from object_detection.utils import model_util
from object_detection.utils import variables_helper
class FasterRCNNInceptionResnetV2KerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster R-CNN with Inception Resnet v2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNInceptionResnetV2KerasFeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
weight_decay)
self._variable_dict = {}
self.classification_backbone = None
def preprocess(self, resized_inputs):
"""Faster R-CNN with Inception Resnet v2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the first half of the Inception Resnet v2 network.
We construct the network in `align_feature_maps=True` mode, which means
that all VALID paddings in the network are changed to SAME padding so that
the feature maps are aligned.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A tensor with shape [batch, height, width, depth]
"""
if not self.classification_backbone:
self.classification_backbone = inception_resnet_v2.inception_resnet_v2(
self._train_batch_norm,
output_stride=self._first_stage_features_stride,
align_feature_maps=True,
weight_decay=self._weight_decay,
weights=None,
include_top=False)
with tf.name_scope(name):
with tf.name_scope('InceptionResnetV2'):
proposal_features = self.classification_backbone.get_layer(
name='block17_20_ac').output
keras_model = tf.keras.Model(
inputs=self.classification_backbone.inputs,
outputs=proposal_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
This function reconstructs the "second half" of the Inception ResNet v2
network after the part defined in `get_proposal_feature_extractor_model`.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
if not self.classification_backbone:
self.classification_backbone = inception_resnet_v2.inception_resnet_v2(
self._train_batch_norm,
output_stride=self._first_stage_features_stride,
align_feature_maps=True,
weight_decay=self._weight_decay,
weights=None,
include_top=False)
with tf.name_scope(name):
with tf.name_scope('InceptionResnetV2'):
proposal_feature_maps = self.classification_backbone.get_layer(
name='block17_20_ac').output
proposal_classifier_features = self.classification_backbone.get_layer(
name='conv_7b_ac').output
keras_model = model_util.extract_submodel(
model=self.classification_backbone,
inputs=proposal_feature_maps,
outputs=proposal_classifier_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
| 6,316 | 38.48125 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v1_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 FPN feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_fpn_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SsdMobilenetV1FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False,
use_keras=True):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
del use_keras
return (ssd_mobilenet_v1_fpn_keras_feature_extractor.
SSDMobileNetV1FpnKerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
use_depthwise=True,
name='MobilenetV1_FPN'))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_384(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=True)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=True)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=True)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
if __name__ == '__main__':
tf.test.main()
| 7,748 | 42.05 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SSD Mobilenet V1 feature extractors.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
is_training=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
is_training: whether the network is in training mode.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
del use_keras
return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers)
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32),
(2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=False)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=False)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=False)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=False)
self.assertEqual(len(variables), 151)
def test_has_fused_batchnorm(self):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False, num_layers=4,
use_keras=False)
if __name__ == '__main__':
tf.test.main()
| 9,609 | 34.201465 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV2 features."""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import mobilenet_v2
from object_detection.utils import ops
from object_detection.utils import shape_utils
class SSDMobileNetV2KerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using MobilenetV2 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False,
name=None):
"""MobileNetV2 Feature Extractor for SSD Models.
Mobilenet v2 (experimental), designed by sandler@. More details can be found
in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor (Functions
as a width multiplier for the mobilenet_v2 network itself).
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDMobileNetV2KerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
self._feature_map_layout = {
'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', ''
][:self._num_layers],
'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
'use_depthwise': self._use_depthwise,
'use_explicit_padding': self._use_explicit_padding,
}
self.classification_backbone = None
self.feature_map_generator = None
def build(self, input_shape):
full_mobilenet_v2 = mobilenet_v2.mobilenet_v2(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
weights=None,
use_explicit_padding=self._use_explicit_padding,
alpha=self._depth_multiplier,
min_depth=self._min_depth,
include_top=False)
conv2d_11_pointwise = full_mobilenet_v2.get_layer(
name='block_13_expand_relu').output
conv2d_13_pointwise = full_mobilenet_v2.get_layer(name='out_relu').output
self.classification_backbone = tf.keras.Model(
inputs=full_mobilenet_v2.inputs,
outputs=[conv2d_11_pointwise, conv2d_13_pointwise])
self.feature_map_generator = (
feature_map_generators.KerasMultiResolutionFeatureMaps(
feature_map_layout=self._feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_maps = self.feature_map_generator({
'layer_15/expansion_output': image_features[0],
'layer_19': image_features[1]})
return list(feature_maps.values())
| 6,981 | 40.559524 | 80 | py |
models | models-master/research/object_detection/models/ssd_efficientnet_bifpn_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ssd_efficientnet_bifpn_feature_extractor."""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models import ssd_efficientnet_bifpn_feature_extractor
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
def _count_params(model, trainable_only=True):
"""Returns the count of all model parameters, or just trainable ones."""
if not trainable_only:
return model.count_params()
else:
return int(np.sum([
tf.keras.backend.count_params(p) for p in model.trainable_weights]))
@parameterized.parameters(
{'efficientdet_version': 'efficientdet-d0',
'efficientnet_version': 'efficientnet-b0',
'bifpn_num_iterations': 3,
'bifpn_num_filters': 64,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d1',
'efficientnet_version': 'efficientnet-b1',
'bifpn_num_iterations': 4,
'bifpn_num_filters': 88,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d2',
'efficientnet_version': 'efficientnet-b2',
'bifpn_num_iterations': 5,
'bifpn_num_filters': 112,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d3',
'efficientnet_version': 'efficientnet-b3',
'bifpn_num_iterations': 6,
'bifpn_num_filters': 160,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d4',
'efficientnet_version': 'efficientnet-b4',
'bifpn_num_iterations': 7,
'bifpn_num_filters': 224,
'bifpn_combine_method': 'fast_attention'},
{'efficientdet_version': 'efficientdet-d5',
'efficientnet_version': 'efficientnet-b5',
'bifpn_num_iterations': 7,
'bifpn_num_filters': 288,
'bifpn_combine_method': 'fast_attention'},
# efficientdet-d6 and efficientdet-d7 only differ in input size.
{'efficientdet_version': 'efficientdet-d6-d7',
'efficientnet_version': 'efficientnet-b6',
'bifpn_num_iterations': 8,
'bifpn_num_filters': 384,
'bifpn_combine_method': 'sum'})
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SSDEfficientNetBiFPNFeatureExtractorTest(
test_case.TestCase, parameterized.TestCase):
def _build_conv_hyperparams(self, add_batch_norm=True):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
force_use_bias: true
activation: SWISH
regularizer {
l2_regularizer {
weight: 0.0004
}
}
initializer {
truncated_normal_initializer {
stddev: 0.03
mean: 0.0
}
}
"""
if add_batch_norm:
batch_norm_proto = """
batch_norm {
scale: true,
decay: 0.99,
epsilon: 0.001,
}
"""
conv_hyperparams_text_proto += batch_norm_proto
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_feature_extractor(self,
efficientnet_version='efficientnet-b0',
bifpn_num_iterations=3,
bifpn_num_filters=64,
bifpn_combine_method='fast_attention'):
"""Constructs a new EfficientNetBiFPN feature extractor."""
depth_multiplier = 1.0
pad_to_multiple = 1
min_depth = 16
return (ssd_efficientnet_bifpn_feature_extractor
.SSDEfficientNetBiFPNKerasFeatureExtractor(
is_training=True,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
bifpn_min_level=3,
bifpn_max_level=7,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method,
efficientnet_version=efficientnet_version))
def test_efficientdet_feature_extractor_shapes(self,
efficientdet_version,
efficientnet_version,
bifpn_num_iterations,
bifpn_num_filters,
bifpn_combine_method):
feature_extractor = self._create_feature_extractor(
efficientnet_version=efficientnet_version,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method)
outputs = feature_extractor(np.zeros((2, 256, 256, 3), dtype=np.float32))
self.assertEqual(outputs[0].shape, (2, 32, 32, bifpn_num_filters))
self.assertEqual(outputs[1].shape, (2, 16, 16, bifpn_num_filters))
self.assertEqual(outputs[2].shape, (2, 8, 8, bifpn_num_filters))
self.assertEqual(outputs[3].shape, (2, 4, 4, bifpn_num_filters))
self.assertEqual(outputs[4].shape, (2, 2, 2, bifpn_num_filters))
def test_efficientdet_feature_extractor_params(self,
efficientdet_version,
efficientnet_version,
bifpn_num_iterations,
bifpn_num_filters,
bifpn_combine_method):
feature_extractor = self._create_feature_extractor(
efficientnet_version=efficientnet_version,
bifpn_num_iterations=bifpn_num_iterations,
bifpn_num_filters=bifpn_num_filters,
bifpn_combine_method=bifpn_combine_method)
_ = feature_extractor(np.zeros((2, 256, 256, 3), dtype=np.float32))
expected_params = {
'efficientdet-d0': 5484829,
'efficientdet-d1': 8185156,
'efficientdet-d2': 9818153,
'efficientdet-d3': 13792706,
'efficientdet-d4': 22691445,
'efficientdet-d5': 35795677,
'efficientdet-d6-d7': 53624512,
}
num_params = _count_params(feature_extractor)
self.assertEqual(expected_params[efficientdet_version], num_params)
if __name__ == '__main__':
tf.test.main()
| 7,390 | 40.061111 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_testbase.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class for ssd_mobilenet_edgetpu_feature_extractor."""
import abc
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
class _SsdMobilenetEdgeTPUFeatureExtractorTestBase(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
"""Base class for MobilenetEdgeTPU tests."""
@abc.abstractmethod
def _get_input_sizes(self):
"""Return feature map sizes for the two inputs to SSD head."""
pass
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 8, 8, input_feature_sizes[0]),
(2, 4, 4, input_feature_sizes[1]),
(2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_keras=False)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 19, 19, input_feature_sizes[0]),
(2, 10, 10, input_feature_sizes[1]),
(2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
input_feature_sizes = self._get_input_sizes()
expected_feature_map_shape = [(2, 20, 20, input_feature_sizes[0]),
(2, 10, 10, input_feature_sizes[1]),
(2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2,
256),
(2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_has_fused_batchnorm(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=False)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
| 4,560 | 39.362832 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_nas_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_nas_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnNASFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_nas.FasterRCNNNASFeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 19, 19, 4032])
def test_extract_proposal_features_input_size_224(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 14, 14, 4032])
def test_extract_proposal_features_input_size_112(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 4032])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [2, 9, 9, 4032])
if __name__ == '__main__':
tf.test.main()
| 4,652 | 40.544643 | 82 | py |
models | models-master/research/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception V2 Faster R-CNN implementation.
See "Rethinking the Inception Architecture for Computer Vision"
https://arxiv.org/abs/1512.00567
"""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch
from nets import inception_v2
def _batch_norm_arg_scope(list_ops,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_scale=False,
train_batch_norm=False):
"""Slim arg scope for InceptionV2 batch norm."""
if use_batch_norm:
batch_norm_params = {
'is_training': train_batch_norm,
'scale': batch_norm_scale,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon
}
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
batch_norm_params = None
return slim.arg_scope(list_ops,
normalizer_fn=normalizer_fn,
normalizer_params=batch_norm_params)
class FasterRCNNInceptionV2FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Inception V2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
depth_multiplier=1.0,
min_depth=16):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
super(FasterRCNNInceptionV2FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Inception V2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping feature extractor tensor names to
tensors
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with tf.variable_scope('InceptionV2',
reuse=self._reuse_weights) as scope:
with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],
batch_norm_scale=True,
train_batch_norm=self._train_batch_norm):
_, activations = inception_v2.inception_v2_base(
preprocessed_inputs,
final_endpoint='Mixed_4e',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
return activations['Mixed_4e'], activations
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
net = proposal_feature_maps
depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
data_format = 'NHWC'
concat_dim = 3 if data_format == 'NHWC' else 1
with tf.variable_scope('InceptionV2', reuse=self._reuse_weights):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='SAME',
data_format=data_format):
with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],
batch_norm_scale=True,
train_batch_norm=self._train_batch_norm):
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1, branch_2], concat_dim)
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1],
scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3],
concat_dim)
with tf.variable_scope('Mixed_5c'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1],
scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
proposal_classifier_features = tf.concat(
[branch_0, branch_1, branch_2, branch_3], concat_dim)
return proposal_classifier_features
| 10,626 | 40.838583 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_feature_extractor_tf2_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SSD Mobilenet V1 feature extractors.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SsdMobilenetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
is_training=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
is_training: whether the network is in training mode.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v1_keras_feature_extractor
.SSDMobileNetV1KerasFeatureExtractor(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(
add_batch_norm=False),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
name='MobilenetV1'))
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32),
(2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=True)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=True)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=True)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier, pad_to_multiple, use_keras=True)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False, num_layers=4,
use_keras=True)
if __name__ == '__main__':
tf.test.main()
| 8,796 | 34.329317 | 80 | py |
models | models-master/research/object_detection/models/ssd_pnasnet_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for PNASNet features.
Based on PNASNet ImageNet model: https://arxiv.org/abs/1712.00559
"""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import variables_helper
try:
from nets.nasnet import pnasnet # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
"""Defines the default arg scope for the PNASNet Large for object detection.
This provides a small edit to switch batch norm training on and off.
Args:
is_batch_norm_training: Boolean indicating whether to train with batch norm.
Default is False.
Returns:
An `arg_scope` to use for the PNASNet Large Model.
"""
imagenet_scope = pnasnet.pnasnet_large_arg_scope()
with slim.arg_scope(imagenet_scope):
with slim.arg_scope([slim.batch_norm],
is_training=is_batch_norm_training) as sc:
return sc
class SSDPNASNetFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using PNASNet features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False):
"""PNASNet Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_depthwise: Whether to use depthwise convolutions.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDPNASNetFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
feature_map_layout = {
'from_layer': ['Cell_7', 'Cell_11', '', '', '', ''][:self._num_layers],
'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with slim.arg_scope(
pnasnet_large_arg_scope_for_detection(
is_batch_norm_training=self._is_training)):
with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d],
reuse=self._reuse_weights):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = pnasnet.build_pnasnet_large(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
num_classes=None,
is_training=self._is_training,
final_endpoint='Cell_11')
with tf.variable_scope('SSD_feature_maps', reuse=self._reuse_weights):
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
def restore_from_classification_checkpoint_fn(self, feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Note that this overrides the default implementation in
ssd_meta_arch.SSDFeatureExtractor which does not work for PNASNet
checkpoints.
Args:
feature_extractor_scope: A scope name for the first stage feature
extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
if variable.op.name.startswith(feature_extractor_scope):
var_name = variable.op.name.replace(feature_extractor_scope + '/', '')
var_name += '/ExponentialMovingAverage'
variables_to_restore[var_name] = variable
return variables_to_restore
| 7,102 | 38.027473 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_nas_fpn_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_mnasfpn_feature_extractor as mnasfpn_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV2MnasFPNFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False):
min_depth = 16
is_training = True
fpn_num_filters = 48
return mnasfpn_feature_extractor.SSDMobileNetV2MnasFPNFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
additional_layer_depth=fpn_num_filters,
use_explicit_padding=use_explicit_padding)
def test_extract_features_returns_correct_shapes_320_256(self):
image_height = 320
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 32, 48), (2, 20, 16, 48),
(2, 10, 8, 48), (2, 5, 4, 48)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 16), (2, 16, 16, 16),
(2, 8, 8, 16), (2, 4, 4, 16)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_preprocess_returns_correct_value_range(self):
image_height = 320
image_width = 320
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
if __name__ == '__main__':
tf.test.main()
| 3,704 | 41.586207 | 107 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_edgetpu_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor
from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor_testbase
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetEdgeTPUFeatureExtractorTest(
ssd_mobilenet_edgetpu_feature_extractor_testbase
._SsdMobilenetEdgeTPUFeatureExtractorTestBase):
def _get_input_sizes(self):
"""Return first two input feature map sizes."""
return [384, 192]
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
use_keras=False):
"""Constructs a new MobileNetEdgeTPU feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (ssd_mobilenet_edgetpu_feature_extractor
.SSDMobileNetEdgeTPUFeatureExtractor(
False,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
if __name__ == '__main__':
tf.test.main()
| 2,565 | 37.878788 | 84 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_keras_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_resnet_keras_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_res
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class FasterRcnnResnetKerasFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, architecture='resnet_v1_50'):
return frcnn_res.FasterRCNNResnet50KerasFeatureExtractor(
is_training=False,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 14, 14, 1024])
def test_extract_proposal_features_half_size_input(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map = feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
features_shape = tf.shape(rpn_feature_map)
self.assertAllEqual(features_shape.numpy(), [1, 7, 7, 1024])
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor()
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
feature_extractor.get_proposal_feature_extractor_model(
name='TestScope')(preprocessed_inputs)
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor()
proposal_feature_maps = tf.random_uniform(
[3, 7, 7, 1024], maxval=255, dtype=tf.float32)
model = feature_extractor.get_box_classifier_feature_extractor_model(
name='TestScope')
proposal_classifier_features = (
model(proposal_feature_maps))
features_shape = tf.shape(proposal_classifier_features)
# Note: due to a slight mismatch in slim and keras resnet definitions
# the output shape of the box classifier is slightly different compared to
# that of the slim implementation. The keras version is more `canonical`
# in that it more accurately reflects the original authors' implementation.
# TODO(jonathanhuang): make the output shape match that of the slim
# implementation by using atrous convolutions.
self.assertAllEqual(features_shape.numpy(), [3, 4, 4, 2048])
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 3,698 | 44.666667 | 91 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_keras_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for Keras MobilenetV1 features."""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import mobilenet_v1
from object_detection.utils import ops
from object_detection.utils import shape_utils
class SSDMobileNetV1KerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using Keras MobilenetV1 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False,
name=None):
"""Keras MobileNetV1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDMobileNetV1KerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
self._feature_map_layout = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
'', ''][:self._num_layers],
'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
self.classification_backbone = None
self._feature_map_generator = None
def build(self, input_shape):
full_mobilenet_v1 = mobilenet_v1.mobilenet_v1(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
weights=None,
use_explicit_padding=self._use_explicit_padding,
alpha=self._depth_multiplier,
min_depth=self._min_depth,
include_top=False)
conv2d_11_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_11_relu').output
conv2d_13_pointwise = full_mobilenet_v1.get_layer(
name='conv_pw_13_relu').output
self.classification_backbone = tf.keras.Model(
inputs=full_mobilenet_v1.inputs,
outputs=[conv2d_11_pointwise, conv2d_13_pointwise])
self._feature_map_generator = (
feature_map_generators.KerasMultiResolutionFeatureMaps(
feature_map_layout=self._feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_maps = self._feature_map_generator({
'Conv2d_11_pointwise': image_features[0],
'Conv2d_13_pointwise': image_features[1]})
return list(feature_maps.values())
| 6,843 | 40.478788 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD MobilenetV2 FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
# A modified config of mobilenet v2 that makes it more detection friendly.
def _create_modified_mobilenet_config():
conv_defs = copy.deepcopy(mobilenet_v2.V2_DEF)
conv_defs['spec'][-1] = mobilenet.op(
slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=256)
return conv_defs
class SSDMobileNetV2FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV2 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD FPN feature extractor based on Mobilenet v2 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to MobileNet v2 layers
{layer_4, layer_7, layer_14, layer_19}, respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDMobileNetV2FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._conv_defs = None
if self._use_depthwise:
self._conv_defs = _create_modified_mobilenet_config()
self._use_native_resize_op = use_native_resize_op
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \
slim.arg_scope(
[mobilenet.depth_multiplier], min_depth=self._min_depth):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = mobilenet_v2.mobilenet_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='layer_19',
depth_multiplier=self._depth_multiplier,
conv_defs=self._conv_defs,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('fpn', reuse=self._reuse_weights):
feature_blocks = [
'layer_4', 'layer_7', 'layer_14', 'layer_19'
]
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append(feature_blocks[level - 2])
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=depth_fn(self._additional_layer_depth),
use_depthwise=self._use_depthwise,
use_explicit_padding=self._use_explicit_padding,
use_native_resize_op=self._use_native_resize_op)
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_{}'.format(
feature_blocks[level - 2])])
last_feature_map = fpn_features['top_down_{}'.format(
feature_blocks[base_fpn_max_level - 2])]
# Construct coarse features
padding = 'VALID' if self._use_explicit_padding else 'SAME'
kernel_size = 3
for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1):
if self._use_depthwise:
conv_op = functools.partial(
slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
if self._use_explicit_padding:
last_feature_map = ops.fixed_padding(
last_feature_map, kernel_size)
last_feature_map = conv_op(
last_feature_map,
num_outputs=depth_fn(self._additional_layer_depth),
kernel_size=[kernel_size, kernel_size],
stride=2,
padding=padding,
scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 19))
feature_maps.append(last_feature_map)
return feature_maps
| 8,758 | 43.015075 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_mnasfpn_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD MobilenetV2 NAS-FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
Block = collections.namedtuple(
'Block', ['inputs', 'output_level', 'kernel_size', 'expansion_size'])
_MNASFPN_CELL_CONFIG = [
Block(inputs=(1, 2), output_level=4, kernel_size=3, expansion_size=256),
Block(inputs=(0, 4), output_level=3, kernel_size=3, expansion_size=128),
Block(inputs=(5, 4), output_level=4, kernel_size=3, expansion_size=128),
Block(inputs=(4, 3), output_level=5, kernel_size=5, expansion_size=128),
Block(inputs=(4, 3), output_level=6, kernel_size=3, expansion_size=96),
]
MNASFPN_DEF = dict(
feature_levels=[3, 4, 5, 6],
spec=[_MNASFPN_CELL_CONFIG] * 4,
)
def _maybe_pad(feature, use_explicit_padding, kernel_size=3):
return ops.fixed_padding(feature,
kernel_size) if use_explicit_padding else feature
# Wrapper around mobilenet.depth_multiplier
def _apply_multiplier(d, multiplier, min_depth):
p = {'num_outputs': d}
mobilenet.depth_multiplier(
p, multiplier=multiplier, divisible_by=8, min_depth=min_depth)
return p['num_outputs']
def _apply_size_dependent_ordering(input_feature, feature_level, block_level,
expansion_size, use_explicit_padding,
use_native_resize_op):
"""Applies Size-Dependent-Ordering when resizing feature maps.
See https://arxiv.org/abs/1912.01106
Args:
input_feature: input feature map to be resized.
feature_level: the level of the input feature.
block_level: the desired output level for the block.
expansion_size: the expansion size for the block.
use_explicit_padding: Whether to use explicit padding.
use_native_resize_op: Whether to use native resize op.
Returns:
A transformed feature at the desired resolution and expansion size.
"""
padding = 'VALID' if use_explicit_padding else 'SAME'
if feature_level >= block_level: # Perform 1x1 then upsampling.
node = slim.conv2d(
input_feature,
expansion_size, [1, 1],
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='Conv1x1')
if feature_level == block_level:
return node
scale = 2**(feature_level - block_level)
if use_native_resize_op:
input_shape = shape_utils.combined_static_and_dynamic_shape(node)
node = tf.image.resize_nearest_neighbor(
node, [input_shape[1] * scale, input_shape[2] * scale])
else:
node = ops.nearest_neighbor_upsampling(node, scale=scale)
else: # Perform downsampling then 1x1.
stride = 2**(block_level - feature_level)
node = slim.max_pool2d(
_maybe_pad(input_feature, use_explicit_padding), [3, 3],
stride=[stride, stride],
padding=padding,
scope='Downsample')
node = slim.conv2d(
node,
expansion_size, [1, 1],
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='Conv1x1')
return node
def _mnasfpn_cell(feature_maps,
feature_levels,
cell_spec,
output_channel=48,
use_explicit_padding=False,
use_native_resize_op=False,
multiplier_func=None):
"""Create a MnasFPN cell.
Args:
feature_maps: input feature maps.
feature_levels: levels of the feature maps.
cell_spec: A list of Block configs.
output_channel: Number of features for the input, output and intermediate
feature maps.
use_explicit_padding: Whether to use explicit padding.
use_native_resize_op: Whether to use native resize op.
multiplier_func: Depth-multiplier function. If None, use identity function.
Returns:
A transformed list of feature maps at the same resolutions as the inputs.
"""
# This is the level where multipliers are realized.
if multiplier_func is None:
multiplier_func = lambda x: x
num_outputs = len(feature_maps)
cell_features = list(feature_maps)
cell_levels = list(feature_levels)
padding = 'VALID' if use_explicit_padding else 'SAME'
for bi, block in enumerate(cell_spec):
with tf.variable_scope('block_{}'.format(bi)):
block_level = block.output_level
intermediate_feature = None
for i, inp in enumerate(block.inputs):
with tf.variable_scope('input_{}'.format(i)):
input_level = cell_levels[inp]
node = _apply_size_dependent_ordering(
cell_features[inp], input_level, block_level,
multiplier_func(block.expansion_size), use_explicit_padding,
use_native_resize_op)
# Add features incrementally to avoid producing AddN, which doesn't
# play well with TfLite.
if intermediate_feature is None:
intermediate_feature = node
else:
intermediate_feature += node
node = tf.nn.relu6(intermediate_feature)
node = slim.separable_conv2d(
_maybe_pad(node, use_explicit_padding, block.kernel_size),
multiplier_func(output_channel),
block.kernel_size,
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='SepConv')
cell_features.append(node)
cell_levels.append(block_level)
# Cell-wide residuals.
out_idx = range(len(cell_features) - num_outputs, len(cell_features))
for in_i, out_i in enumerate(out_idx):
if cell_features[out_i].shape.as_list(
) == cell_features[in_i].shape.as_list():
cell_features[out_i] += cell_features[in_i]
return cell_features[-num_outputs:]
def mnasfpn(feature_maps,
head_def,
output_channel=48,
use_explicit_padding=False,
use_native_resize_op=False,
multiplier_func=None):
"""Create the MnasFPN head given head_def."""
features = feature_maps
for ci, cell_spec in enumerate(head_def['spec']):
with tf.variable_scope('cell_{}'.format(ci)):
features = _mnasfpn_cell(features, head_def['feature_levels'], cell_spec,
output_channel, use_explicit_padding,
use_native_resize_op, multiplier_func)
return features
def training_scope(l2_weight_decay=1e-4, is_training=None):
"""Arg scope for training MnasFPN."""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.initializers.he_normal(),
weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \
slim.arg_scope(
[slim.separable_conv2d],
weights_initializer=tf.initializers.truncated_normal(
stddev=0.536), # He_normal for 3x3 depthwise kernel.
weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \
slim.arg_scope([slim.batch_norm],
is_training=is_training,
epsilon=0.01,
decay=0.99,
center=True,
scale=True) as s:
return s
class SSDMobileNetV2MnasFPNFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV2 MnasFPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=6,
additional_layer_depth=48,
head_def=None,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False,
data_format='channels_last'):
"""SSD MnasFPN feature extractor based on Mobilenet v2 architecture.
See https://arxiv.org/abs/1912.01106
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
fpn_min_level: the highest resolution feature map to use in MnasFPN.
Currently the only valid value is 3.
fpn_max_level: the smallest resolution feature map to construct or use in
MnasFPN. Currentl the only valid value is 6.
additional_layer_depth: additional feature map layer channel depth for
NAS-FPN.
head_def: A dictionary specifying the MnasFPN head architecture. Default
uses MNASFPN_DEF.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_native_resize_op: Whether to use native resize op. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
data_format: The ordering of the dimensions in the inputs, The valid
values are {'channels_first', 'channels_last').
"""
super(SSDMobileNetV2MnasFPNFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=(
override_base_feature_extractor_hyperparams))
if fpn_min_level != 3 or fpn_max_level != 6:
raise ValueError('Min and max levels of MnasFPN must be 3 and 6 for now.')
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._fpn_layer_depth = additional_layer_depth
self._head_def = head_def if head_def else MNASFPN_DEF
self._data_format = data_format
self._use_native_resize_op = use_native_resize_op
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _verify_config(self, inputs):
"""Verify that MnasFPN config and its inputs."""
num_inputs = len(inputs)
assert len(self._head_def['feature_levels']) == num_inputs
base_width = inputs[0].shape.as_list(
)[1] * 2**self._head_def['feature_levels'][0]
for i in range(1, num_inputs):
width = inputs[i].shape.as_list()[1]
level = self._head_def['feature_levels'][i]
expected_width = base_width // 2**level
if width != expected_width:
raise ValueError(
'Resolution of input {} does not match its level {}.'.format(
i, level))
for cell_spec in self._head_def['spec']:
# The last K nodes in a cell are the inputs to the next cell. Assert that
# their feature maps are at the right level.
for i in range(num_inputs):
if cell_spec[-num_inputs +
i].output_level != self._head_def['feature_levels'][i]:
raise ValueError(
'Mismatch between node level {} and desired output level {}.'
.format(cell_spec[-num_inputs + i].output_level,
self._head_def['feature_levels'][i]))
# Assert that each block only uses precending blocks.
for bi, block_spec in enumerate(cell_spec):
for inp in block_spec.inputs:
if inp >= bi + num_inputs:
raise ValueError(
'Block {} is trying to access uncreated block {}.'.format(
bi, inp))
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v2.training_scope(is_training=None, bn_decay=0.99)), \
slim.arg_scope(
[mobilenet.depth_multiplier], min_depth=self._min_depth):
with slim.arg_scope(
training_scope(l2_weight_decay=4e-5,
is_training=self._is_training)):
_, image_features = mobilenet_v2.mobilenet_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='layer_18',
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
multiplier_func = functools.partial(
_apply_multiplier,
multiplier=self._depth_multiplier,
min_depth=self._min_depth)
with tf.variable_scope('MnasFPN', reuse=self._reuse_weights):
with slim.arg_scope(
training_scope(l2_weight_decay=1e-4, is_training=self._is_training)):
# Create C6 by downsampling C5.
c6 = slim.max_pool2d(
_maybe_pad(image_features['layer_18'], self._use_explicit_padding),
[3, 3],
stride=[2, 2],
padding='VALID' if self._use_explicit_padding else 'SAME',
scope='C6_downsample')
c6 = slim.conv2d(
c6,
multiplier_func(self._fpn_layer_depth),
[1, 1],
activation_fn=tf.identity,
normalizer_fn=slim.batch_norm,
weights_regularizer=None, # this 1x1 has no kernel regularizer.
padding='VALID',
scope='C6_Conv1x1')
image_features['C6'] = tf.identity(c6) # Needed for quantization.
for k in sorted(image_features.keys()):
tf.logging.error('{}: {}'.format(k, image_features[k]))
mnasfpn_inputs = [
image_features['layer_7'], # C3
image_features['layer_14'], # C4
image_features['layer_18'], # C5
image_features['C6'] # C6
]
self._verify_config(mnasfpn_inputs)
feature_maps = mnasfpn(
mnasfpn_inputs,
head_def=self._head_def,
output_channel=self._fpn_layer_depth,
use_explicit_padding=self._use_explicit_padding,
use_native_resize_op=self._use_native_resize_op,
multiplier_func=multiplier_func)
return feature_maps
| 16,309 | 38.587379 | 80 | py |
models | models-master/research/object_detection/models/bidirectional_feature_pyramid_generators.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate bidirectional feature pyramids based on image features.
Provides bidirectional feature pyramid network (BiFPN) generators that can be
used to build object detection feature extractors, as proposed by Tan et al.
See https://arxiv.org/abs/1911.09070 for more details.
"""
import collections
import functools
from six.moves import range
from six.moves import zip
import tensorflow as tf
from object_detection.utils import bifpn_utils
def _create_bifpn_input_config(fpn_min_level,
fpn_max_level,
input_max_level,
level_scales=None):
"""Creates a BiFPN input config for the input levels from a backbone network.
Args:
fpn_min_level: the minimum pyramid level (highest feature map resolution) to
use in the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution) to
use in the BiFPN.
input_max_level: the maximum pyramid level that will be provided as input to
the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels
from input_max_level, up to the desired fpn_max_level.
level_scales: a list of pyramid level scale factors. If 'None', each level's
scale is set to 2^level by default, which corresponds to each successive
feature map scaling by a factor of 2.
Returns:
A list of dictionaries for each feature map expected as input to the BiFPN,
where each has entries for the feature map 'name' and 'scale'.
"""
if not level_scales:
level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)]
bifpn_input_params = []
for i in range(fpn_min_level, min(fpn_max_level, input_max_level) + 1):
bifpn_input_params.append({
'name': '0_up_lvl_{}'.format(i),
'scale': level_scales[i - fpn_min_level]
})
return bifpn_input_params
def _get_bifpn_output_node_names(fpn_min_level, fpn_max_level, node_config):
"""Returns a list of BiFPN output node names, given a BiFPN node config.
Args:
fpn_min_level: the minimum pyramid level (highest feature map resolution)
used by the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution)
used by the BiFPN.
node_config: the BiFPN node_config, a list of dictionaries corresponding to
each node in the BiFPN computation graph, where each entry should have an
associated 'name'.
Returns:
A list of strings corresponding to the names of the output BiFPN nodes.
"""
num_output_nodes = fpn_max_level - fpn_min_level + 1
return [node['name'] for node in node_config[-num_output_nodes:]]
def _create_bifpn_node_config(bifpn_num_iterations,
bifpn_num_filters,
fpn_min_level,
fpn_max_level,
input_max_level,
bifpn_node_params=None,
level_scales=None,
use_native_resize_op=False):
"""Creates a config specifying a bidirectional feature pyramid network.
Args:
bifpn_num_iterations: the number of top-down bottom-up feature computations
to repeat in the BiFPN.
bifpn_num_filters: the number of filters (channels) for every feature map
used in the BiFPN.
fpn_min_level: the minimum pyramid level (highest feature map resolution) to
use in the BiFPN.
fpn_max_level: the maximum pyramid level (lowest feature map resolution) to
use in the BiFPN.
input_max_level: the maximum pyramid level that will be provided as input to
the BiFPN. Accordingly, the BiFPN will compute additional pyramid levels
from input_max_level, up to the desired fpn_max_level.
bifpn_node_params: If not 'None', a dictionary of additional default BiFPN
node parameters that will be applied to all BiFPN nodes.
level_scales: a list of pyramid level scale factors. If 'None', each level's
scale is set to 2^level by default, which corresponds to each successive
feature map scaling by a factor of 2.
use_native_resize_op: If true, will use
tf.compat.v1.image.resize_nearest_neighbor for unsampling.
Returns:
A list of dictionaries used to define nodes in the BiFPN computation graph,
as proposed by EfficientDet, Tan et al (https://arxiv.org/abs/1911.09070).
Each node's entry has the corresponding keys:
name: String. The name of this node in the BiFPN. The node name follows
the format '{bifpn_iteration}_{dn|up}_lvl_{pyramid_level}', where 'dn'
or 'up' refers to whether the node is in the top-down or bottom-up
portion of a single BiFPN iteration.
scale: the scale factor for this node, by default 2^level.
inputs: A list of names of nodes which are inputs to this node.
num_channels: The number of channels for this node.
combine_method: String. Name of the method used to combine input
node feature maps, 'fast_attention' by default for nodes which have more
than one input. Otherwise, 'None' for nodes with only one input node.
input_op: A (partial) function which is called to construct the layers
that will be applied to this BiFPN node's inputs. This function is
called with the arguments:
input_op(name, input_scale, input_num_channels, output_scale,
output_num_channels, conv_hyperparams, is_training,
freeze_batchnorm)
post_combine_op: A (partial) function which is called to construct the
layers that will be applied to the result of the combine operation for
this BiFPN node. This function will be called with the arguments:
post_combine_op(name, conv_hyperparams, is_training, freeze_batchnorm)
If 'None', then no layers will be applied after the combine operation
for this node.
"""
if not level_scales:
level_scales = [2**i for i in range(fpn_min_level, fpn_max_level + 1)]
default_node_params = {
'num_channels':
bifpn_num_filters,
'combine_method':
'fast_attention',
'input_op':
functools.partial(
_create_bifpn_resample_block,
downsample_method='max_pooling',
use_native_resize_op=use_native_resize_op),
'post_combine_op':
functools.partial(
bifpn_utils.create_conv_block,
num_filters=bifpn_num_filters,
kernel_size=3,
strides=1,
padding='SAME',
use_separable=True,
apply_batchnorm=True,
apply_activation=True,
conv_bn_act_pattern=False),
}
if bifpn_node_params:
default_node_params.update(bifpn_node_params)
bifpn_node_params = []
# Create additional base pyramid levels not provided as input to the BiFPN.
# Note, combine_method and post_combine_op are set to None for additional
# base pyramid levels because they do not combine multiple input BiFPN nodes.
for i in range(input_max_level + 1, fpn_max_level + 1):
node_params = dict(default_node_params)
node_params.update({
'name': '0_up_lvl_{}'.format(i),
'scale': level_scales[i - fpn_min_level],
'inputs': ['0_up_lvl_{}'.format(i - 1)],
'combine_method': None,
'post_combine_op': None,
})
bifpn_node_params.append(node_params)
for i in range(bifpn_num_iterations):
# The first bottom-up feature pyramid (which includes the input pyramid
# levels from the backbone network and the additional base pyramid levels)
# is indexed at 0. So, the first top-down bottom-up pass of the BiFPN is
# indexed from 1, and repeated for bifpn_num_iterations iterations.
bifpn_i = i + 1
# Create top-down nodes.
for level_i in reversed(range(fpn_min_level, fpn_max_level)):
inputs = []
# BiFPN nodes in the top-down pass receive input from the corresponding
# level from the previous BiFPN iteration's bottom-up pass, except for the
# bottom-most (min) level node, which is computed once in the initial
# bottom-up pass, and is afterwards only computed in each top-down pass.
if level_i > fpn_min_level or bifpn_i == 1:
inputs.append('{}_up_lvl_{}'.format(bifpn_i - 1, level_i))
else:
inputs.append('{}_dn_lvl_{}'.format(bifpn_i - 1, level_i))
inputs.append(bifpn_node_params[-1]['name'])
node_params = dict(default_node_params)
node_params.update({
'name': '{}_dn_lvl_{}'.format(bifpn_i, level_i),
'scale': level_scales[level_i - fpn_min_level],
'inputs': inputs
})
bifpn_node_params.append(node_params)
# Create bottom-up nodes.
for level_i in range(fpn_min_level + 1, fpn_max_level + 1):
# BiFPN nodes in the bottom-up pass receive input from the corresponding
# level from the preceding top-down pass, except for the top (max) level
# which does not have a corresponding node in the top-down pass.
inputs = ['{}_up_lvl_{}'.format(bifpn_i - 1, level_i)]
if level_i < fpn_max_level:
inputs.append('{}_dn_lvl_{}'.format(bifpn_i, level_i))
inputs.append(bifpn_node_params[-1]['name'])
node_params = dict(default_node_params)
node_params.update({
'name': '{}_up_lvl_{}'.format(bifpn_i, level_i),
'scale': level_scales[level_i - fpn_min_level],
'inputs': inputs
})
bifpn_node_params.append(node_params)
return bifpn_node_params
def _create_bifpn_resample_block(name,
input_scale,
input_num_channels,
output_scale,
output_num_channels,
conv_hyperparams,
is_training,
freeze_batchnorm,
downsample_method=None,
use_native_resize_op=False,
maybe_apply_1x1_conv=True,
apply_1x1_pre_sampling=True,
apply_1x1_post_sampling=False):
"""Creates resample block layers for input feature maps to BiFPN nodes.
Args:
name: String. Name used for this block of layers.
input_scale: Scale factor of the input feature map.
input_num_channels: Number of channels in the input feature map.
output_scale: Scale factor of the output feature map.
output_num_channels: Number of channels in the output feature map.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Indicates whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
downsample_method: String. Method to use when downsampling feature maps.
use_native_resize_op: Bool. Whether to use the native resize up when
upsampling feature maps.
maybe_apply_1x1_conv: Bool. If 'True', a 1x1 convolution will only be
applied if the input_num_channels differs from the output_num_channels.
apply_1x1_pre_sampling: Bool. Whether a 1x1 convolution will be applied to
the input feature map before the up/down-sampling operation.
apply_1x1_post_sampling: Bool. Whether a 1x1 convolution will be applied to
the input feature map after the up/down-sampling operation.
Returns:
A list of layers which may be applied to the input feature maps in order to
compute feature maps with the specified scale and number of channels.
"""
# By default, 1x1 convolutions are only applied before sampling when the
# number of input and output channels differ.
if maybe_apply_1x1_conv and output_num_channels == input_num_channels:
apply_1x1_pre_sampling = False
apply_1x1_post_sampling = False
apply_bn_for_resampling = True
layers = []
if apply_1x1_pre_sampling:
layers.extend(
bifpn_utils.create_conv_block(
name=name + '1x1_pre_sample/',
num_filters=output_num_channels,
kernel_size=1,
strides=1,
padding='SAME',
use_separable=False,
apply_batchnorm=apply_bn_for_resampling,
apply_activation=False,
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
layers.extend(
bifpn_utils.create_resample_feature_map_ops(input_scale, output_scale,
downsample_method,
use_native_resize_op,
conv_hyperparams, is_training,
freeze_batchnorm, name))
if apply_1x1_post_sampling:
layers.extend(
bifpn_utils.create_conv_block(
name=name + '1x1_post_sample/',
num_filters=output_num_channels,
kernel_size=1,
strides=1,
padding='SAME',
use_separable=False,
apply_batchnorm=apply_bn_for_resampling,
apply_activation=False,
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
return layers
def _create_bifpn_combine_op(num_inputs, name, combine_method):
"""Creates a BiFPN output config, a list of the output BiFPN node names.
Args:
num_inputs: The number of inputs to this combine operation.
name: String. The name of this combine operation.
combine_method: String. The method used to combine input feature maps.
Returns:
A function which may be called with a list of num_inputs feature maps
and which will return a single feature map.
"""
combine_op = None
if num_inputs < 1:
raise ValueError('Expected at least 1 input for BiFPN combine.')
elif num_inputs == 1:
combine_op = lambda x: x[0]
else:
combine_op = bifpn_utils.BiFPNCombineLayer(
combine_method=combine_method, name=name)
return combine_op
class KerasBiFpnFeatureMaps(tf.keras.Model):
"""Generates Keras based BiFPN feature maps from an input feature map pyramid.
A Keras model that generates multi-scale feature maps for detection by
iteratively computing top-down and bottom-up feature pyramids, as in the
EfficientDet paper by Tan et al, see arxiv.org/abs/1911.09070 for details.
"""
def __init__(self,
bifpn_num_iterations,
bifpn_num_filters,
fpn_min_level,
fpn_max_level,
input_max_level,
is_training,
conv_hyperparams,
freeze_batchnorm,
bifpn_node_params=None,
use_native_resize_op=False,
name=None):
"""Constructor.
Args:
bifpn_num_iterations: The number of top-down bottom-up iterations.
bifpn_num_filters: The number of filters (channels) to be used for all
feature maps in this BiFPN.
fpn_min_level: The minimum pyramid level (highest feature map resolution)
to use in the BiFPN.
fpn_max_level: The maximum pyramid level (lowest feature map resolution)
to use in the BiFPN.
input_max_level: The maximum pyramid level that will be provided as input
to the BiFPN. Accordingly, the BiFPN will compute any additional pyramid
levels from input_max_level up to the desired fpn_max_level, with each
successivel level downsampling by a scale factor of 2 by default.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
bifpn_node_params: An optional dictionary that may be used to specify
default parameters for BiFPN nodes, without the need to provide a custom
bifpn_node_config. For example, if '{ combine_method: 'sum' }', then all
BiFPN nodes will combine input feature maps by summation, rather than
by the default fast attention method.
use_native_resize_op: If True, will use
tf.compat.v1.image.resize_nearest_neighbor for unsampling.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasBiFpnFeatureMaps, self).__init__(name=name)
bifpn_node_config = _create_bifpn_node_config(
bifpn_num_iterations,
bifpn_num_filters,
fpn_min_level,
fpn_max_level,
input_max_level,
bifpn_node_params,
use_native_resize_op=use_native_resize_op)
bifpn_input_config = _create_bifpn_input_config(fpn_min_level,
fpn_max_level,
input_max_level)
bifpn_output_node_names = _get_bifpn_output_node_names(
fpn_min_level, fpn_max_level, bifpn_node_config)
self.bifpn_node_config = bifpn_node_config
self.bifpn_output_node_names = bifpn_output_node_names
self.node_input_blocks = []
self.node_combine_op = []
self.node_post_combine_block = []
all_node_params = bifpn_input_config
all_node_names = [node['name'] for node in all_node_params]
for node_config in bifpn_node_config:
# Maybe transform and/or resample input feature maps.
input_blocks = []
for input_name in node_config['inputs']:
if input_name not in all_node_names:
raise ValueError(
'Input feature map ({}) does not exist:'.format(input_name))
input_index = all_node_names.index(input_name)
input_params = all_node_params[input_index]
input_block = node_config['input_op'](
name='{}/input_{}/'.format(node_config['name'], input_name),
input_scale=input_params['scale'],
input_num_channels=input_params.get('num_channels', None),
output_scale=node_config['scale'],
output_num_channels=node_config['num_channels'],
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm)
input_blocks.append((input_index, input_block))
# Combine input feature maps.
combine_op = _create_bifpn_combine_op(
num_inputs=len(input_blocks),
name=(node_config['name'] + '/combine'),
combine_method=node_config['combine_method'])
# Post-combine layers.
post_combine_block = []
if node_config['post_combine_op']:
post_combine_block.extend(node_config['post_combine_op'](
name=node_config['name'] + '/post_combine/',
conv_hyperparams=conv_hyperparams,
is_training=is_training,
freeze_batchnorm=freeze_batchnorm))
self.node_input_blocks.append(input_blocks)
self.node_combine_op.append(combine_op)
self.node_post_combine_block.append(post_combine_block)
all_node_params.append(node_config)
all_node_names.append(node_config['name'])
def call(self, feature_pyramid):
"""Compute BiFPN feature maps from input feature pyramid.
Executed when calling the `.__call__` method on input.
Args:
feature_pyramid: list of tuples of (tensor_name, image_feature_tensor).
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
feature_maps = [el[1] for el in feature_pyramid]
output_feature_maps = [None for node in self.bifpn_output_node_names]
for index, node in enumerate(self.bifpn_node_config):
node_scope = 'node_{:02d}'.format(index)
with tf.name_scope(node_scope):
# Apply layer blocks to this node's input feature maps.
input_block_results = []
for input_index, input_block in self.node_input_blocks[index]:
block_result = feature_maps[input_index]
for layer in input_block:
block_result = layer(block_result)
input_block_results.append(block_result)
# Combine the resulting feature maps.
node_result = self.node_combine_op[index](input_block_results)
# Apply post-combine layer block if applicable.
for layer in self.node_post_combine_block[index]:
node_result = layer(node_result)
feature_maps.append(node_result)
if node['name'] in self.bifpn_output_node_names:
index = self.bifpn_output_node_names.index(node['name'])
output_feature_maps[index] = node_result
return collections.OrderedDict(
zip(self.bifpn_output_node_names, output_feature_maps))
| 22,101 | 43.115768 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_mobilenet_v1_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet v1 Faster R-CNN implementation."""
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import shape_utils
from nets import mobilenet_v1
def _get_mobilenet_conv_no_last_stride_defs(conv_depth_ratio_in_percentage):
if conv_depth_ratio_in_percentage not in [25, 50, 75, 100]:
raise ValueError(
'Only the following ratio percentages are supported: 25, 50, 75, 100')
conv_depth_ratio_in_percentage = float(conv_depth_ratio_in_percentage) / 100.0
channels = np.array([
32, 64, 128, 128, 256, 256, 512, 512, 512, 512, 512, 512, 1024, 1024
], dtype=np.float32)
channels = (channels * conv_depth_ratio_in_percentage).astype(np.int32)
return [
mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=channels[0]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[1]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[2]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[3]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[4]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[5]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=channels[6]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[7]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[8]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[9]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[10]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[11]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[12]),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=channels[13])
]
class FasterRCNNMobilenetV1FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Mobilenet V1 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
depth_multiplier=1.0,
min_depth=16,
skip_last_stride=False,
conv_depth_ratio_in_percentage=100):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
skip_last_stride: Skip the last stride if True.
conv_depth_ratio_in_percentage: Conv depth ratio in percentage. Only
applied if skip_last_stride is True.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
self._skip_last_stride = skip_last_stride
self._conv_depth_ratio_in_percentage = conv_depth_ratio_in_percentage
super(FasterRCNNMobilenetV1FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Mobilenet V1 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping feature extractor tensor names to
tensors
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
preprocessed_inputs = shape_utils.check_min_image_dim(
min_dim=33, image_tensor=preprocessed_inputs)
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=self._train_batch_norm,
weight_decay=self._weight_decay)):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
params = {}
if self._skip_last_stride:
params['conv_defs'] = _get_mobilenet_conv_no_last_stride_defs(
conv_depth_ratio_in_percentage=self.
_conv_depth_ratio_in_percentage)
_, activations = mobilenet_v1.mobilenet_v1_base(
preprocessed_inputs,
final_endpoint='Conv2d_11_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope,
**params)
return activations['Conv2d_11_pointwise'], activations
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
net = proposal_feature_maps
conv_depth = 1024
if self._skip_last_stride:
conv_depth_ratio = float(self._conv_depth_ratio_in_percentage) / 100.0
conv_depth = int(float(conv_depth) * conv_depth_ratio)
depth = lambda d: max(int(d * 1.0), 16)
with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights):
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=self._train_batch_norm,
weight_decay=self._weight_decay)):
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d], padding='SAME'):
net = slim.separable_conv2d(
net,
depth(conv_depth), [3, 3],
depth_multiplier=1,
stride=2,
scope='Conv2d_12_pointwise')
return slim.separable_conv2d(
net,
depth(conv_depth), [3, 3],
depth_multiplier=1,
stride=1,
scope='Conv2d_13_pointwise')
| 7,955 | 40.010309 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v1_ppn_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_ppn_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV1PpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (ssd_mobilenet_v1_ppn_feature_extractor.
SSDMobileNetV1PpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_320(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512),
(2, 5, 5, 512), (2, 3, 3, 512),
(2, 2, 2, 512), (2, 1, 1, 512)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_300(self):
image_height = 300
image_width = 300
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 512),
(2, 5, 5, 512), (2, 3, 3, 512),
(2, 2, 2, 512), (2, 1, 1, 512)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_640(self):
image_height = 640
image_width = 640
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 512), (2, 20, 20, 512),
(2, 10, 10, 512), (2, 5, 5, 512),
(2, 3, 3, 512), (2, 2, 2, 512)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512),
(2, 5, 5, 512), (2, 3, 3, 512),
(2, 2, 2, 512), (2, 1, 1, 512)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 512),
(2, 5, 5, 512), (2, 3, 3, 512),
(2, 2, 2, 512)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32),
(2, 4, 4, 32), (2, 2, 2, 32),
(2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name)
def test_has_fused_batchnorm(self):
image_height = 320
image_width = 320
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
if __name__ == '__main__':
tf.test.main()
| 8,456 | 44.224599 | 80 | py |
models | models-master/research/object_detection/models/center_net_mobilenet_v2_feature_extractor.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MobileNet V2[1] feature extractor for CenterNet[2] meta architecture.
[1]: https://arxiv.org/abs/1801.04381
[2]: https://arxiv.org/abs/1904.07850
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import center_net_meta_arch
from object_detection.models.keras_models import mobilenet_v2 as mobilenetv2
class CenterNetMobileNetV2FeatureExtractor(
center_net_meta_arch.CenterNetFeatureExtractor):
"""The MobileNet V2 feature extractor for CenterNet."""
def __init__(self,
mobilenet_v2_net,
channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.),
bgr_ordering=False):
"""Intializes the feature extractor.
Args:
mobilenet_v2_net: The underlying mobilenet_v2 network to use.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetMobileNetV2FeatureExtractor, self).__init__(
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
self._network = mobilenet_v2_net
output = self._network(self._network.input)
# MobileNet by itself transforms a 224x224x3 volume into a 7x7x1280, which
# leads to a stride of 32. We perform upsampling to get it to a target
# stride of 4.
for num_filters in [256, 128, 64]:
# 1. We use a simple convolution instead of a deformable convolution
conv = tf.keras.layers.Conv2D(
filters=num_filters, kernel_size=1, strides=1, padding='same')
output = conv(output)
output = tf.keras.layers.BatchNormalization()(output)
output = tf.keras.layers.ReLU()(output)
# 2. We use the default initialization for the convolution layers
# instead of initializing it to do bilinear upsampling.
conv_transpose = tf.keras.layers.Conv2DTranspose(
filters=num_filters, kernel_size=3, strides=2, padding='same')
output = conv_transpose(output)
output = tf.keras.layers.BatchNormalization()(output)
output = tf.keras.layers.ReLU()(output)
self._network = tf.keras.models.Model(
inputs=self._network.input, outputs=output)
def preprocess(self, resized_inputs):
resized_inputs = super(CenterNetMobileNetV2FeatureExtractor,
self).preprocess(resized_inputs)
return tf.keras.applications.mobilenet_v2.preprocess_input(resized_inputs)
def load_feature_extractor_weights(self, path):
self._network.load_weights(path)
def call(self, inputs):
return [self._network(inputs)]
@property
def out_stride(self):
"""The stride in the output image of the network."""
return 4
@property
def num_feature_outputs(self):
"""The number of feature outputs returned by the feature extractor."""
return 1
@property
def classification_backbone(self):
return self._network
def mobilenet_v2(channel_means, channel_stds, bgr_ordering,
depth_multiplier=1.0, **kwargs):
"""The MobileNetV2 backbone for CenterNet."""
del kwargs
# We set 'is_training' to True for now.
network = mobilenetv2.mobilenet_v2(
batchnorm_training=True,
alpha=depth_multiplier,
include_top=False,
weights='imagenet' if depth_multiplier == 1.0 else None)
return CenterNetMobileNetV2FeatureExtractor(
network,
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
| 4,444 | 36.041667 | 80 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor_tf1_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd resnet v1 feature extractors."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_resnet_v1_ppn_feature_extractor
from object_detection.models import ssd_resnet_v1_ppn_feature_extractor_testbase
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet50V1PpnFeatureExtractorTest(
ssd_resnet_v1_ppn_feature_extractor_testbase.
SSDResnetPpnFeatureExtractorTestBase):
"""SSDResnet50v1 feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return ssd_resnet_v1_ppn_feature_extractor.SSDResnet50V1PpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding)
def _scope_name(self):
return 'resnet_v1_50'
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet101V1PpnFeatureExtractorTest(
ssd_resnet_v1_ppn_feature_extractor_testbase.
SSDResnetPpnFeatureExtractorTestBase):
"""SSDResnet101v1 feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return (
ssd_resnet_v1_ppn_feature_extractor.SSDResnet101V1PpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _scope_name(self):
return 'resnet_v1_101'
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SSDResnet152V1PpnFeatureExtractorTest(
ssd_resnet_v1_ppn_feature_extractor_testbase.
SSDResnetPpnFeatureExtractorTestBase):
"""SSDResnet152v1 feature extractor test."""
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False):
min_depth = 32
is_training = True
return (
ssd_resnet_v1_ppn_feature_extractor.SSDResnet152V1PpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
def _scope_name(self):
return 'resnet_v1_152'
if __name__ == '__main__':
tf.test.main()
| 3,327 | 34.404255 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception Resnet v2 Faster R-CNN implementation.
See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261)
as well as
"Speed/accuracy trade-offs for modern convolutional object detectors" by
Huang et al. (https://arxiv.org/abs/1611.10012)
"""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import variables_helper
from nets import inception_resnet_v2
class FasterRCNNInceptionResnetV2FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN with Inception Resnet v2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN with Inception Resnet v2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Extracts features using the first half of the Inception Resnet v2 network.
We construct the network in `align_feature_maps=True` mode, which means
that all VALID paddings in the network are changed to SAME padding so that
the feature maps are aligned.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
if len(preprocessed_inputs.get_shape().as_list()) != 4:
raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
'tensor of shape %s' % preprocessed_inputs.get_shape())
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(
weight_decay=self._weight_decay)):
# Forces is_training to False to disable batch norm update.
with slim.arg_scope([slim.batch_norm],
is_training=self._train_batch_norm):
with tf.variable_scope('InceptionResnetV2',
reuse=self._reuse_weights) as scope:
return inception_resnet_v2.inception_resnet_v2_base(
preprocessed_inputs, final_endpoint='PreAuxLogits',
scope=scope, output_stride=self._first_stage_features_stride,
align_feature_maps=True)
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
This function reconstructs the "second half" of the Inception ResNet v2
network after the part defined in `_extract_proposal_features`.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights):
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(
weight_decay=self._weight_decay)):
# Forces is_training to False to disable batch norm update.
with slim.arg_scope([slim.batch_norm],
is_training=self._train_batch_norm):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(proposal_feature_maps,
256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(
tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(
proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(
tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(
proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(
tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(
proposal_feature_maps, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(
[tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20)
net = inception_resnet_v2.block8(net, activation_fn=None)
proposal_classifier_features = slim.conv2d(
net, 1536, 1, scope='Conv2d_7b_1x1')
return proposal_classifier_features
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Note that this overrides the default implementation in
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for
InceptionResnetV2 checkpoints.
TODO(jonathanhuang,rathodv): revisit whether it's possible to force the
`Repeat` namescope as created in `_extract_box_classifier_features` to
start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can
be used.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
if variable.op.name.startswith(
first_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
first_stage_feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
if variable.op.name.startswith(
second_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
second_stage_feature_extractor_scope
+ '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2')
var_name = var_name.replace(
second_stage_feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
| 9,286 | 42.600939 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf2_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_keras_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class SsdMobilenetV2FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
use_keras: unused argument.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v2_keras_feature_extractor.
SSDMobileNetV2KerasFeatureExtractor(
is_training=False,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
name='MobilenetV2'))
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True, use_keras=True)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=True)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=True)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=True)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=True)
self.assertEqual(len(variables), 292)
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False, num_layers=4,
use_keras=True)
if __name__ == '__main__':
tf.test.main()
| 7,894 | 39.906736 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_feature_extractor_tf1_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV2FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return ssd_mobilenet_v2_feature_extractor.SSDMobileNetV2FeatureExtractor(
False,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers)
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple)
self.assertEqual(len(variables), 292)
def test_has_fused_batchnorm(self):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False, num_layers=4)
if __name__ == '__main__':
tf.test.main()
| 7,991 | 39.568528 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_nas_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NASNet Faster R-CNN implementation.
Learning Transferable Architectures for Scalable Image Recognition
Barret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le
https://arxiv.org/abs/1707.07012
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.utils import variables_helper
# pylint: disable=g-import-not-at-top
try:
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils
except: # pylint: disable=bare-except
pass
# pylint: enable=g-import-not-at-top
arg_scope = slim.arg_scope
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
"""Defines the default arg scope for the NASNet-A Large for object detection.
This provides a small edit to switch batch norm training on and off.
Args:
is_batch_norm_training: Boolean indicating whether to train with batch norm.
Returns:
An `arg_scope` to use for the NASNet Large Model.
"""
imagenet_scope = nasnet.nasnet_large_arg_scope()
with arg_scope(imagenet_scope):
with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
return sc
# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states.
def _build_nasnet_base(hidden_previous,
hidden,
normal_cell,
reduction_cell,
hparams,
true_cell_num,
start_cell_num):
"""Constructs a NASNet image model."""
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
# Note: The None is prepended to match the behavior of _imagenet_stem()
cell_outputs = [None, hidden_previous, hidden]
net = hidden
# NOTE: In the nasnet.py code, filter_scaling starts at 1.0. We instead
# start at 2.0 because 1 reduction cell has been created which would
# update the filter_scaling to 2.0.
filter_scaling = 2.0
# Run the cells
for cell_num in range(start_cell_num, hparams.num_cells):
stride = 1
if hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
if cell_num in reduction_indices:
filter_scaling *= hparams.filter_scaling_rate
net = reduction_cell(
net,
scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=true_cell_num)
true_cell_num += 1
cell_outputs.append(net)
if not hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num)
true_cell_num += 1
cell_outputs.append(net)
# Final nonlinearity.
# Note that we have dropped the final pooling, dropout and softmax layers
# from the default nasnet version.
with tf.variable_scope('final_layer'):
net = tf.nn.relu(net)
return net
# TODO(shlens): Only fixed_shape_resizer is currently supported for NASNet
# featurization. The reason for this is that nasnet.py only supports
# inputs with fully known shapes. We need to update nasnet.py to handle
# shapes not known at compile time.
class FasterRCNNNASFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN with NASNet-A feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 16.
"""
if first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 16.')
super(FasterRCNNNASFeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN with NAS preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Extracts features using the first half of the NASNet network.
We construct the network in `align_feature_maps=True` mode, which means
that all VALID paddings in the network are changed to SAME padding so that
the feature maps are aligned.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
end_points: A dictionary mapping feature extractor tensor names to tensors
Raises:
ValueError: If the created network is missing the required activation.
"""
del scope
if len(preprocessed_inputs.get_shape().as_list()) != 4:
raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
'tensor of shape %s' % preprocessed_inputs.get_shape())
with slim.arg_scope(nasnet_large_arg_scope_for_detection(
is_batch_norm_training=self._train_batch_norm)):
with arg_scope([slim.conv2d,
slim.batch_norm,
slim.separable_conv2d],
reuse=self._reuse_weights):
_, end_points = nasnet.build_nasnet_large(
preprocessed_inputs, num_classes=None,
is_training=self._is_training,
final_endpoint='Cell_11')
# Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
rpn_feature_map = tf.concat([end_points['Cell_10'],
end_points['Cell_11']], 3)
# nasnet.py does not maintain the batch size in the first dimension.
# This work around permits us retaining the batch for below.
batch = preprocessed_inputs.get_shape().as_list()[0]
shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
rpn_feature_map_shape = [batch] + shape_without_batch
rpn_feature_map.set_shape(rpn_feature_map_shape)
return rpn_feature_map, end_points
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
This function reconstructs the "second half" of the NASNet-A
network after the part defined in `_extract_proposal_features`.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
del scope
# Note that we always feed into 2 layers of equal depth
# where the first N channels corresponds to previous hidden layer
# and the second N channels correspond to the final hidden layer.
hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3)
# Note that what follows is largely a copy of build_nasnet_large() within
# nasnet.py. We are copying to minimize code pollution in slim.
# TODO(shlens,skornblith): Determine the appropriate drop path schedule.
# For now the schedule is the default (1.0->0.7 over 250,000 train steps).
hparams = nasnet.large_imagenet_config()
if not self._is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
# Calculate the total number of cells in the network
# -- Add 2 for the reduction cells.
total_num_cells = hparams.num_cells + 2
# -- And add 2 for the stem cells for ImageNet training.
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps)
with arg_scope([slim.dropout, nasnet_utils.drop_path],
is_training=self._is_training):
with arg_scope([slim.batch_norm], is_training=self._train_batch_norm):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
# This corresponds to the cell number just past 'Cell_11' used by
# by _extract_proposal_features().
start_cell_num = 12
# Note that this number equals:
# start_cell_num + 2 stem cells + 1 reduction cell
true_cell_num = 15
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
net = _build_nasnet_base(hidden_previous,
hidden,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
hparams=hparams,
true_cell_num=true_cell_num,
start_cell_num=start_cell_num)
proposal_classifier_features = net
return proposal_classifier_features
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Note that this overrides the default implementation in
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for
NASNet-A checkpoints.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
# Note that the NAS checkpoint only contains the moving average version of
# the Variables so we need to generate an appropriate dictionary mapping.
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
if variable.op.name.startswith(
first_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
first_stage_feature_extractor_scope + '/', '')
var_name += '/ExponentialMovingAverage'
variables_to_restore[var_name] = variable
if variable.op.name.startswith(
second_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
second_stage_feature_extractor_scope + '/', '')
var_name += '/ExponentialMovingAverage'
variables_to_restore[var_name] = variable
return variables_to_restore
| 13,088 | 37.955357 | 80 | py |
models | models-master/research/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing hourglass feature extractor for CenterNet."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_hourglass_feature_extractor as hourglass
from object_detection.models.keras_models import hourglass_network
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetHourglassFeatureExtractorTest(test_case.TestCase):
def test_center_net_hourglass_feature_extractor(self):
net = hourglass_network.HourglassNetwork(
num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6],
input_channel_dims=4, channel_dims_per_stage=[6, 8, 10, 12, 14],
num_hourglasses=2)
model = hourglass.CenterNetHourglassFeatureExtractor(net)
def graph_fn():
return model(tf.zeros((2, 64, 64, 3), dtype=np.float32))
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs[0].shape, (2, 16, 16, 6))
self.assertEqual(outputs[1].shape, (2, 16, 16, 6))
if __name__ == '__main__':
tf.test.main()
| 1,801 | 38.173913 | 87 | py |
models | models-master/research/object_detection/models/ssd_inception_v2_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.models.ssd_inception_v2_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_inception_v2_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdInceptionV2FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
is_training=True):
"""Constructs a SsdInceptionV2FeatureExtractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
is_training: whether the network is in training mode.
Returns:
an ssd_inception_v2_feature_extractor.SsdInceptionV2FeatureExtractor.
"""
min_depth = 32
return ssd_inception_v2_feature_extractor.SSDInceptionV2FeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=True)
def test_extract_features_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 128), (2, 10, 10, 128),
(2, 5, 5, 32), (2, 3, 3, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1024),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'InceptionV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name)
def test_extract_features_with_fewer_layers(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1024),
(2, 2, 2, 512), (2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, num_layers=4)
if __name__ == '__main__':
tf.test.main()
| 6,657 | 40.354037 | 80 | py |
models | models-master/research/object_detection/models/ssd_feature_extractor_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class SSDFeatureExtractors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
class SsdFeatureExtractorTestBase(test_case.TestCase):
def _build_conv_hyperparams(self, add_batch_norm=True):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
if add_batch_norm:
batch_norm_proto = """
batch_norm {
scale: false
}
"""
conv_hyperparams_text_proto += batch_norm_proto
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def conv_hyperparams_fn(self):
with slim.arg_scope([]) as sc:
return sc
@abstractmethod
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
use_depthwise: Whether to use depthwise convolutions.
Returns:
an ssd_meta_arch.SSDFeatureExtractor or an
ssd_meta_arch.SSDKerasFeatureExtractor object.
"""
pass
def _create_features(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
use_keras=False):
kwargs = {}
if use_explicit_padding:
kwargs.update({'use_explicit_padding': use_explicit_padding})
if use_depthwise:
kwargs.update({'use_depthwise': use_depthwise})
if num_layers != 6:
kwargs.update({'num_layers': num_layers})
if use_keras:
kwargs.update({'use_keras': use_keras})
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
**kwargs)
return feature_extractor
def _extract_features(self,
image_tensor,
feature_extractor,
use_keras=False):
if use_keras:
feature_maps = feature_extractor(image_tensor)
else:
feature_maps = feature_extractor.extract_features(image_tensor)
return feature_maps
def check_extract_features_returns_correct_shape(self,
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shapes,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False,
num_channels=3):
with test_utils.GraphContextOrNone() as g:
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn(image_tensor):
return self._extract_features(
image_tensor,
feature_extractor,
use_keras=use_keras)
image_tensor = np.random.rand(batch_size, image_height, image_width,
num_channels).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor], graph=g)
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_returns_correct_shapes_with_dynamic_inputs(
self,
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shapes,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([batch_size, image_height, image_width,
3], dtype=tf.float32)
return self._extract_features(
image_tensor,
feature_extractor,
use_keras=use_keras)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
], graph=g)
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_raises_error_with_invalid_image_size(
self,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
batch = 4
width = tf.random.uniform([], minval=image_width, maxval=image_width+1,
dtype=tf.int32)
height = tf.random.uniform([], minval=image_height, maxval=image_height+1,
dtype=tf.int32)
shape = tf.stack([batch, height, width, 3])
preprocessed_inputs = tf.random.uniform(shape)
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn():
feature_maps = self._extract_features(
preprocessed_inputs,
feature_extractor,
use_keras=use_keras)
return feature_maps
if self.is_tf2():
with self.assertRaises(ValueError):
self.execute_cpu(graph_fn, [], graph=g)
else:
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute_cpu(graph_fn, [], graph=g)
def check_feature_extractor_variables_under_scope(self,
depth_multiplier,
pad_to_multiple,
scope_name,
use_keras=False,
use_depthwise=False):
variables = self.get_feature_extractor_variables(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
for variable in variables:
self.assertTrue(variable.name.startswith(scope_name))
def get_feature_extractor_variables(self,
depth_multiplier,
pad_to_multiple,
use_keras=False,
use_depthwise=False):
g = tf.Graph()
with g.as_default():
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
self._extract_features(
preprocessed_inputs,
feature_extractor,
use_keras=use_keras)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
| 9,745 | 35.916667 | 80 | py |
models | models-master/research/object_detection/models/__init__.py | 0 | 0 | 0 | py |
|
models | models-master/research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
override_base_feature_extractor_hyperparams=False):
"""MobileNetV1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_depthwise: Whether to use depthwise convolutions. Default is False.
num_layers: Number of SSD layers.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDMobileNetV1FeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
num_layers=num_layers,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
feature_map_layout = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
'', ''][:self._num_layers],
'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
| 5,617 | 39.710145 | 80 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_ppn_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD feature extractors based on Resnet v1 and PPN architectures."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import resnet_v1
class _SSDResnetPpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD feature extractor based on resnet architecture and PPN."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_base_fn,
resnet_scope_name,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
base_feature_map_depth=1024,
num_layers=6,
override_base_feature_extractor_hyperparams=False,
use_bounded_activations=False):
"""Resnet based PPN Feature Extractor for SSD Models.
See go/pooling-pyramid for more details about PPN.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
resnet_base_fn: base resnet network to use.
resnet_scope_name: scope name to construct resnet
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
base_feature_map_depth: Depth of the base feature before the max pooling.
num_layers: Number of layers used to make predictions. They are pooled
from the base feature.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
use_bounded_activations: Whether or not to use bounded activations for
resnet v1 bottleneck residual unit. Bounded activations better lend
themselves to quantized inference.
"""
super(_SSDResnetPpnFeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams)
self._resnet_base_fn = resnet_base_fn
self._resnet_scope_name = resnet_scope_name
self._base_feature_map_depth = base_feature_map_depth
self._num_layers = num_layers
self._use_bounded_activations = use_bounded_activations
def _filter_features(self, image_features):
# TODO(rathodv): Change resnet endpoint to strip scope prefixes instead
# of munging the scope here.
filtered_image_features = dict({})
for key, feature in image_features.items():
feature_name = key.split('/')[-1]
if feature_name in ['block2', 'block3', 'block4']:
filtered_image_features[feature_name] = feature
return filtered_image_features
def preprocess(self, resized_inputs):
"""SSD preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge.
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Raises:
ValueError: depth multiplier is not supported.
"""
if self._depth_multiplier != 1.0:
raise ValueError('Depth multiplier not supported.')
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
with tf.variable_scope(
self._resnet_scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
with slim.arg_scope(
[resnet_v1.bottleneck],
use_bounded_activations=self._use_bounded_activations):
_, activations = self._resnet_base_fn(
inputs=ops.pad_to_multiple(preprocessed_inputs,
self._pad_to_multiple),
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
store_non_strided_activations=True,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=self._base_feature_map_depth,
num_layers=self._num_layers,
image_features={
'image_features': self._filter_features(activations)['block3']
})
return list(feature_maps.values())
class SSDResnet50V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor):
"""PPN Resnet50 v1 Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""Resnet50 v1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet50V1PpnFeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, resnet_v1.resnet_v1_50, 'resnet_v1_50',
reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams=(
override_base_feature_extractor_hyperparams))
class SSDResnet101V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor):
"""PPN Resnet101 v1 Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""Resnet101 v1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet101V1PpnFeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, resnet_v1.resnet_v1_101, 'resnet_v1_101',
reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams=(
override_base_feature_extractor_hyperparams))
class SSDResnet152V1PpnFeatureExtractor(_SSDResnetPpnFeatureExtractor):
"""PPN Resnet152 v1 Feature Extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""Resnet152 v1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet152V1PpnFeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, resnet_v1.resnet_v1_152, 'resnet_v1_152',
reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams=(
override_base_feature_extractor_hyperparams))
| 12,410 | 42.700704 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V2 FPN feature extractors in SSD.
"""
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_fpn_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
@parameterized.parameters(
{
'use_depthwise': False
},
{
'use_depthwise': True
},
)
class SsdMobilenetV2FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
is_training=True,
use_explicit_padding=False,
use_keras=False,
use_depthwise=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
use_depthwise: Whether to use depthwise convolutions.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v2_fpn_feature_extractor
.SSDMobileNetV2FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_depthwise=use_depthwise,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self, use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_384(self, use_depthwise):
use_keras = False
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_4_channels(self,
use_depthwise):
use_keras = False
image_height = 320
image_width = 320
num_channels = 4
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise,
num_channels=num_channels)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise,
num_channels=num_channels)
def test_extract_features_with_dynamic_image_shape(self,
use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self, use_depthwise):
use_keras = False
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self, use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False,
use_keras=use_keras,
use_depthwise=use_depthwise)
self.check_extract_features_returns_correct_shape(
2,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_extract_features_raises_error_with_invalid_image_size(
self, use_depthwise):
use_keras = False
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_preprocess_returns_correct_value_range(self,
use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self, use_depthwise):
use_keras = False
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier,
pad_to_multiple,
scope_name,
use_keras=use_keras,
use_depthwise=use_depthwise)
def test_fused_batchnorm(self, use_depthwise):
use_keras = False
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(
any('FusedBatchNorm' in op.type
for op in tf.get_default_graph().get_operations()))
def test_variable_count(self, use_depthwise):
use_keras = False
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
expected_variables_len = 274
if use_depthwise:
expected_variables_len = 278
self.assertEqual(len(variables), expected_variables_len)
def test_get_expected_feature_map_variable_names(self,
use_depthwise):
use_keras = False
depth_multiplier = 1.0
pad_to_multiple = 1
slim_expected_feature_maps_variables = set([
# Slim Mobilenet V2 feature maps
'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights',
'MobilenetV2/Conv_1/weights',
# FPN layers
'MobilenetV2/fpn/bottom_up_Conv2d_20/weights',
'MobilenetV2/fpn/bottom_up_Conv2d_21/weights',
'MobilenetV2/fpn/smoothing_1/weights',
'MobilenetV2/fpn/smoothing_2/weights',
'MobilenetV2/fpn/projection_1/weights',
'MobilenetV2/fpn/projection_2/weights',
'MobilenetV2/fpn/projection_3/weights',
])
slim_expected_feature_maps_variables_with_depthwise = set([
# Slim Mobilenet V2 feature maps
'MobilenetV2/expanded_conv_4/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_7/depthwise/depthwise_weights',
'MobilenetV2/expanded_conv_14/depthwise/depthwise_weights',
'MobilenetV2/Conv_1/weights',
# FPN layers
'MobilenetV2/fpn/bottom_up_Conv2d_20/pointwise_weights',
'MobilenetV2/fpn/bottom_up_Conv2d_20/depthwise_weights',
'MobilenetV2/fpn/bottom_up_Conv2d_21/pointwise_weights',
'MobilenetV2/fpn/bottom_up_Conv2d_21/depthwise_weights',
'MobilenetV2/fpn/smoothing_1/depthwise_weights',
'MobilenetV2/fpn/smoothing_1/pointwise_weights',
'MobilenetV2/fpn/smoothing_2/depthwise_weights',
'MobilenetV2/fpn/smoothing_2/pointwise_weights',
'MobilenetV2/fpn/projection_1/weights',
'MobilenetV2/fpn/projection_2/weights',
'MobilenetV2/fpn/projection_3/weights',
])
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
_ = feature_extractor.extract_features(preprocessed_inputs)
expected_feature_maps_variables = slim_expected_feature_maps_variables
if use_depthwise:
expected_feature_maps_variables = (
slim_expected_feature_maps_variables_with_depthwise)
actual_variable_set = set([
var.op.name for var in g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
])
variable_intersection = expected_feature_maps_variables.intersection(
actual_variable_set)
self.assertSetEqual(expected_feature_maps_variables,
variable_intersection)
if __name__ == '__main__':
tf.test.main()
| 14,559 | 34.773956 | 80 | py |
models | models-master/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for embedded_ssd_mobilenet_v1_feature_extractor."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import embedded_ssd_mobilenet_v1_feature_extractor
from object_detection.models import ssd_feature_extractor_test
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class EmbeddedSSDMobileNetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
return (embedded_ssd_mobilenet_v1_feature_extractor.
EmbeddedSSDMobileNetV1FeatureExtractor(
is_training, depth_multiplier, min_depth, pad_to_multiple,
self.conv_hyperparams_fn,
override_base_feature_extractor_hyperparams=True))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024),
(2, 4, 4, 512), (2, 2, 2, 256),
(2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024),
(2, 4, 4, 512), (2, 2, 2, 256),
(2, 1, 1, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 32), (2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple_of_1(
self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 16, 16, 512), (2, 8, 8, 1024),
(2, 4, 4, 512), (2, 2, 2, 256),
(2, 1, 1, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape)
def test_extract_features_raises_error_with_pad_to_multiple_not_1(self):
depth_multiplier = 1.0
pad_to_multiple = 2
with self.assertRaises(ValueError):
_ = self._create_feature_extractor(depth_multiplier, pad_to_multiple)
def test_extract_features_raises_error_with_invalid_image_size(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name)
if __name__ == '__main__':
tf.test.main()
| 5,411 | 39.691729 | 80 | py |
models | models-master/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnetv1 FPN [1] based feature extractors for CenterNet[2] meta architecture.
[1]: https://arxiv.org/abs/1612.03144.
[2]: https://arxiv.org/abs/1904.07850.
"""
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures.center_net_meta_arch import CenterNetFeatureExtractor
from object_detection.models.keras_models import resnet_v1
_RESNET_MODEL_OUTPUT_LAYERS = {
'resnet_v1_18': ['conv2_block2_out', 'conv3_block2_out',
'conv4_block2_out', 'conv5_block2_out'],
'resnet_v1_34': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out'],
}
class CenterNetResnetV1FpnFeatureExtractor(CenterNetFeatureExtractor):
"""Resnet v1 FPN base feature extractor for the CenterNet model.
This feature extractor uses residual skip connections and nearest neighbor
upsampling to produce an output feature map of stride 4, which has precise
localization information along with strong semantic information from the top
of the net. This design does not exactly follow the original FPN design,
specifically:
- Since only one output map is necessary for heatmap prediction (stride 4
output), the top-down feature maps can have different numbers of channels.
Specifically, the top down feature maps have the following sizes:
[h/4, w/4, 64], [h/8, w/8, 128], [h/16, w/16, 256], [h/32, w/32, 256].
- No additional coarse features are used after conv5_x.
"""
def __init__(self, resnet_type, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes the feature extractor with a specific ResNet architecture.
Args:
resnet_type: A string specifying which kind of ResNet to use. Currently
only `resnet_v1_50` and `resnet_v1_101` are supported.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetResnetV1FpnFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
if resnet_type == 'resnet_v1_50':
self._base_model = tf.keras.applications.ResNet50(weights=None,
include_top=False)
elif resnet_type == 'resnet_v1_101':
self._base_model = tf.keras.applications.ResNet101(weights=None,
include_top=False)
elif resnet_type == 'resnet_v1_18':
self._base_model = resnet_v1.resnet_v1_18(weights=None, include_top=False)
elif resnet_type == 'resnet_v1_34':
self._base_model = resnet_v1.resnet_v1_34(weights=None, include_top=False)
else:
raise ValueError('Unknown Resnet Model {}'.format(resnet_type))
output_layers = _RESNET_MODEL_OUTPUT_LAYERS[resnet_type]
outputs = [self._base_model.get_layer(output_layer_name).output
for output_layer_name in output_layers]
self._resnet_model = tf.keras.models.Model(inputs=self._base_model.input,
outputs=outputs)
resnet_outputs = self._resnet_model(self._base_model.input)
# Construct the top-down feature maps.
top_layer = resnet_outputs[-1]
residual_op = tf.keras.layers.Conv2D(filters=256, kernel_size=1,
strides=1, padding='same')
top_down = residual_op(top_layer)
num_filters_list = [256, 128, 64]
for i, num_filters in enumerate(num_filters_list):
level_ind = 2 - i
# Upsample.
upsample_op = tf.keras.layers.UpSampling2D(2, interpolation='nearest')
top_down = upsample_op(top_down)
# Residual (skip-connection) from bottom-up pathway.
residual_op = tf.keras.layers.Conv2D(filters=num_filters, kernel_size=1,
strides=1, padding='same')
residual = residual_op(resnet_outputs[level_ind])
# Merge.
top_down = top_down + residual
next_num_filters = num_filters_list[i+1] if i + 1 <= 2 else 64
conv = tf.keras.layers.Conv2D(filters=next_num_filters,
kernel_size=3, strides=1, padding='same')
top_down = conv(top_down)
top_down = tf.keras.layers.BatchNormalization()(top_down)
top_down = tf.keras.layers.ReLU()(top_down)
self._feature_extractor_model = tf.keras.models.Model(
inputs=self._base_model.input, outputs=top_down)
def preprocess(self, resized_inputs):
"""Preprocess input images for the ResNet model.
This scales images in the range [0, 255] to the range [-1, 1]
Args:
resized_inputs: a [batch, height, width, channels] float32 tensor.
Returns:
outputs: a [batch, height, width, channels] float32 tensor.
"""
resized_inputs = super(
CenterNetResnetV1FpnFeatureExtractor, self).preprocess(resized_inputs)
return tf.keras.applications.resnet.preprocess_input(resized_inputs)
def load_feature_extractor_weights(self, path):
self._base_model.load_weights(path)
def call(self, inputs):
"""Returns image features extracted by the backbone.
Args:
inputs: An image tensor of shape [batch_size, input_height,
input_width, 3]
Returns:
features_list: A list of length 1 containing a tensor of shape
[batch_size, input_height // 4, input_width // 4, 64] containing
the features extracted by the ResNet.
"""
return [self._feature_extractor_model(inputs)]
@property
def num_feature_outputs(self):
return 1
@property
def out_stride(self):
return 4
@property
def classification_backbone(self):
return self._base_model
def resnet_v1_101_fpn(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v1 101 FPN feature extractor."""
del kwargs
return CenterNetResnetV1FpnFeatureExtractor(
resnet_type='resnet_v1_101',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering
)
def resnet_v1_50_fpn(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v1 50 FPN feature extractor."""
del kwargs
return CenterNetResnetV1FpnFeatureExtractor(
resnet_type='resnet_v1_50',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def resnet_v1_34_fpn(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v1 34 FPN feature extractor."""
del kwargs
return CenterNetResnetV1FpnFeatureExtractor(
resnet_type='resnet_v1_34',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering
)
def resnet_v1_18_fpn(channel_means, channel_stds, bgr_ordering, **kwargs):
"""The ResNet v1 18 FPN feature extractor."""
del kwargs
return CenterNetResnetV1FpnFeatureExtractor(
resnet_type='resnet_v1_18',
channel_means=channel_means,
channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
| 8,230 | 37.643192 | 94 | py |
models | models-master/research/object_detection/models/ssd_resnet_v1_fpn_keras_feature_extractor.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Keras-based ResnetV1 FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.models.keras_models import resnet_v1
from object_detection.utils import ops
from object_detection.utils import shape_utils
_RESNET_MODEL_OUTPUT_LAYERS = {
'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block6_out', 'conv5_block3_out'],
'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out',
'conv4_block23_out', 'conv5_block3_out'],
'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out',
'conv4_block36_out', 'conv5_block3_out'],
}
class SSDResNetV1FpnKerasFeatureExtractor(
ssd_meta_arch.SSDKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based ResnetV1 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
resnet_v1_base_model,
resnet_v1_base_model_name,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=False,
name=None):
"""SSD Keras based FPN feature extractor Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: whether to use explicit padding when extracting
features. Default is None, as it's an invalid option and not implemented
in this feature extractor.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDResNetV1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
if self._use_explicit_padding:
raise ValueError('Explicit padding is not a valid option.')
if self._use_depthwise:
raise ValueError('Depthwise is not a valid option.')
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
self._resnet_block_names = ['block1', 'block2', 'block3', 'block4']
self.classification_backbone = None
self._fpn_features_generator = None
self._coarse_feature_layers = []
def build(self, input_shape):
full_resnet_v1_model = self._resnet_v1_base_model(
batchnorm_training=(self._is_training and not self._freeze_batchnorm),
conv_hyperparams=(self._conv_hyperparams
if self._override_base_feature_extractor_hyperparams
else None),
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
classes=None,
weights=None,
include_top=False)
output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name]
outputs = [full_resnet_v1_model.get_layer(output_layer_name).output
for output_layer_name in output_layers]
self.classification_backbone = tf.keras.Model(
inputs=full_resnet_v1_model.inputs,
outputs=outputs)
# pylint:disable=g-long-lambda
self._depth_fn = lambda d: max(
int(d * self._depth_multiplier), self._min_depth)
self._base_fpn_max_level = min(self._fpn_max_level, 5)
self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level
self._fpn_features_generator = (
feature_map_generators.KerasFpnTopDownFeatureMaps(
num_levels=self._num_levels,
depth=self._depth_fn(self._additional_layer_depth),
is_training=self._is_training,
conv_hyperparams=self._conv_hyperparams,
freeze_batchnorm=self._freeze_batchnorm,
name='FeatureMaps'))
# Construct coarse feature layers
depth = self._depth_fn(self._additional_layer_depth)
for i in range(self._base_fpn_max_level, self._fpn_max_level):
layers = []
layer_name = 'bottom_up_block{}'.format(i)
layers.append(
tf.keras.layers.Conv2D(
depth,
[3, 3],
padding='SAME',
strides=2,
name=layer_name + '_conv',
**self._conv_hyperparams.params()))
layers.append(
self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
self._conv_hyperparams.build_activation_layer(
name=layer_name))
self._coarse_feature_layers.append(layers)
self.built = True
def preprocess(self, resized_inputs):
"""SSD preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge.
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def _extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
image_features = self.classification_backbone(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))
feature_block_list = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
feature_block_map = dict(
list(zip(self._resnet_block_names, image_features)))
fpn_input_image_features = [
(feature_block, feature_block_map[feature_block])
for feature_block in feature_block_list]
fpn_features = self._fpn_features_generator(fpn_input_image_features)
feature_maps = []
for level in range(self._fpn_min_level, self._base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_block{}'.format(level-1)])
last_feature_map = fpn_features['top_down_block{}'.format(
self._base_fpn_max_level - 1)]
for coarse_feature_layers in self._coarse_feature_layers:
for layer in coarse_feature_layers:
last_feature_map = layer(last_feature_map)
feature_maps.append(last_feature_map)
return feature_maps
class SSDResNet50V1FpnKerasFeatureExtractor(
SSDResNetV1FpnKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based ResnetV1-50 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=False,
name='ResNet50V1_FPN'):
"""SSD Keras based FPN feature extractor ResnetV1-50 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: whether to use explicit padding when extracting
features. Default is None, as it's an invalid option and not implemented
in this feature extractor.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDResNet50V1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDResNet101V1FpnKerasFeatureExtractor(
SSDResNetV1FpnKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based ResnetV1-101 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=None,
use_depthwise=None,
override_base_feature_extractor_hyperparams=False,
name='ResNet101V1_FPN'):
"""SSD Keras based FPN feature extractor ResnetV1-101 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: whether to use explicit padding when extracting
features. Default is None, as it's an invalid option and not implemented
in this feature extractor.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDResNet101V1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
class SSDResNet152V1FpnKerasFeatureExtractor(
SSDResNetV1FpnKerasFeatureExtractor):
"""SSD Feature Extractor using Keras-based ResnetV1-152 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=None,
override_base_feature_extractor_hyperparams=False,
name='ResNet152V1_FPN'):
"""SSD Keras based FPN feature extractor ResnetV1-152 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: a `hyperparams_builder.KerasLayerHyperparams` object
containing convolution hyperparameters for the layers added on top of
the base feature extractor.
freeze_batchnorm: whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: whether to use explicit padding when extracting
features. Default is None, as it's an invalid option and not implemented
in this feature extractor.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams`.
name: a string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(SSDResNet152V1FpnKerasFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams,
name=name)
| 20,749 | 44.404814 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet V1 Faster R-CNN implementation.
See "Deep Residual Learning for Image Recognition" by He et al., 2015.
https://arxiv.org/abs/1512.03385
Note: this implementation assumes that the classification checkpoint used
to finetune this model is trained using the same configuration as that of
the MSRA provided checkpoints
(see https://github.com/KaimingHe/deep-residual-networks), e.g., with
same preprocessing, batch norm scaling, etc.
"""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import faster_rcnn_meta_arch
from nets import resnet_utils
from nets import resnet_v1
class FasterRCNNResnetV1FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Resnet V1 feature extractor implementation."""
def __init__(self,
architecture,
resnet_model,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
activation_fn=tf.nn.relu):
"""Constructor.
Args:
architecture: Architecture name of the Resnet V1 model.
resnet_model: Definition of the Resnet V1 model.
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
activation_fn: Activaton functon to use in Resnet V1 model.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._architecture = architecture
self._resnet_model = resnet_model
self._activation_fn = activation_fn
super(FasterRCNNResnetV1FeatureExtractor,
self).__init__(is_training, first_stage_features_stride,
batch_norm_trainable, reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping feature extractor tensor names to
tensors
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
if len(preprocessed_inputs.get_shape().as_list()) != 4:
raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
'tensor of shape %s' % preprocessed_inputs.get_shape())
shape_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
# Disables batchnorm for fine-tuning with smaller batch sizes.
# TODO(chensun): Figure out if it is needed when image
# batch size is bigger.
with slim.arg_scope(
resnet_utils.resnet_arg_scope(
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=self._activation_fn,
weight_decay=self._weight_decay)):
with tf.variable_scope(
self._architecture, reuse=self._reuse_weights) as var_scope:
_, activations = self._resnet_model(
preprocessed_inputs,
num_classes=None,
is_training=self._train_batch_norm,
global_pool=False,
output_stride=self._first_stage_features_stride,
spatial_squeeze=False,
scope=var_scope)
handle = scope + '/%s/block3' % self._architecture
return activations[handle], activations
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope(self._architecture, reuse=self._reuse_weights):
with slim.arg_scope(
resnet_utils.resnet_arg_scope(
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=self._activation_fn,
weight_decay=self._weight_decay)):
with slim.arg_scope([slim.batch_norm],
is_training=self._train_batch_norm):
blocks = [
resnet_utils.Block('block4', resnet_v1.bottleneck, [{
'depth': 2048,
'depth_bottleneck': 512,
'stride': 1
}] * 3)
]
proposal_classifier_features = resnet_utils.stack_blocks_dense(
proposal_feature_maps, blocks)
return proposal_classifier_features
class FasterRCNNResnet50FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 50 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
activation_fn=tf.nn.relu):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
activation_fn: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet50FeatureExtractor,
self).__init__('resnet_v1_50', resnet_v1.resnet_v1_50, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay, activation_fn)
class FasterRCNNResnet101FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 101 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
activation_fn=tf.nn.relu):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
activation_fn: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet101FeatureExtractor,
self).__init__('resnet_v1_101', resnet_v1.resnet_v1_101, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay, activation_fn)
class FasterRCNNResnet152FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 152 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
activation_fn=tf.nn.relu):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
activation_fn: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet152FeatureExtractor,
self).__init__('resnet_v1_152', resnet_v1.resnet_v1_152, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay, activation_fn)
| 10,357 | 37.505576 | 80 | py |
models | models-master/research/object_detection/models/faster_rcnn_pnas_feature_extractor_tf1_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.faster_rcnn_pnas_feature_extractor."""
import unittest
import tensorflow.compat.v1 as tf
from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class FasterRcnnPNASFeatureExtractorTest(tf.test.TestCase):
def _build_feature_extractor(self, first_stage_features_stride):
return frcnn_pnas.FasterRCNNPNASFeatureExtractor(
is_training=False,
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0)
def test_extract_proposal_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 299, 299, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 19, 19, 4320])
def test_extract_proposal_features_input_size_224(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 224, 224, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 14, 14, 4320])
def test_extract_proposal_features_input_size_112(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[1, 112, 112, 3], maxval=255, dtype=tf.float32)
rpn_feature_map, _ = feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
features_shape = tf.shape(rpn_feature_map)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [1, 7, 7, 4320])
def test_extract_proposal_features_dies_on_invalid_stride(self):
with self.assertRaises(ValueError):
self._build_feature_extractor(first_stage_features_stride=99)
def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
preprocessed_inputs = tf.random_uniform(
[224, 224, 3], maxval=255, dtype=tf.float32)
with self.assertRaises(ValueError):
feature_extractor.extract_proposal_features(
preprocessed_inputs, scope='TestScope')
def test_extract_box_classifier_features_returns_expected_size(self):
feature_extractor = self._build_feature_extractor(
first_stage_features_stride=16)
proposal_feature_maps = tf.random_uniform(
[2, 17, 17, 1088], maxval=255, dtype=tf.float32)
proposal_classifier_features = (
feature_extractor.extract_box_classifier_features(
proposal_feature_maps, scope='TestScope'))
features_shape = tf.shape(proposal_classifier_features)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
features_shape_out = sess.run(features_shape)
self.assertAllEqual(features_shape_out, [2, 9, 9, 4320])
def test_filter_scaling_computation(self):
expected_filter_scaling = {
((4, 8), 2): 1.0,
((4, 8), 7): 2.0,
((4, 8), 8): 2.0,
((4, 8), 9): 4.0
}
for args, filter_scaling in expected_filter_scaling.items():
reduction_indices, start_cell_num = args
self.assertAlmostEqual(
frcnn_pnas._filter_scaling(reduction_indices, start_cell_num),
filter_scaling)
if __name__ == '__main__':
tf.test.main()
| 5,086 | 39.696 | 84 | py |
models | models-master/research/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing ResNet v2 models for the CenterNet meta architecture."""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import center_net_resnet_feature_extractor
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetResnetFeatureExtractorTest(test_case.TestCase):
def test_output_size(self):
"""Verify that shape of features returned by the backbone is correct."""
model = center_net_resnet_feature_extractor.\
CenterNetResnetFeatureExtractor('resnet_v2_101')
def graph_fn():
img = np.zeros((8, 512, 512, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 128, 128, 64))
def test_output_size_resnet50(self):
"""Verify that shape of features returned by the backbone is correct."""
model = center_net_resnet_feature_extractor.\
CenterNetResnetFeatureExtractor('resnet_v2_50')
def graph_fn():
img = np.zeros((8, 224, 224, 3), dtype=np.float32)
processed_img = model.preprocess(img)
return model(processed_img)
outputs = self.execute(graph_fn, [])
self.assertEqual(outputs.shape, (8, 56, 56, 64))
if __name__ == '__main__':
tf.test.main()
| 2,124 | 37.636364 | 80 | py |
models | models-master/research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD MobilenetV1 FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
# A modified config of mobilenet v1 that makes it more detection friendly,
def _create_modified_mobilenet_config():
conv_defs = copy.deepcopy(mobilenet_v1.MOBILENETV1_CONV_DEFS)
conv_defs[-2] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=512)
conv_defs[-1] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=256)
return conv_defs
class SSDMobileNetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False):
"""SSD FPN feature extractor based on Mobilenet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to MobileNet v1 layers
{Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise,
Conv2d_13_pointwise}, respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize
to do upsampling in FPN. Default is false.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDMobileNetV1FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._conv_defs = None
if self._use_depthwise:
self._conv_defs = _create_modified_mobilenet_config()
self._use_native_resize_op = use_native_resize_op
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
conv_defs=self._conv_defs,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('fpn', reuse=self._reuse_weights):
feature_blocks = [
'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise',
'Conv2d_13_pointwise'
]
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append(feature_blocks[level - 2])
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=depth_fn(self._additional_layer_depth),
use_depthwise=self._use_depthwise,
use_explicit_padding=self._use_explicit_padding,
use_native_resize_op=self._use_native_resize_op)
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_{}'.format(
feature_blocks[level - 2])])
last_feature_map = fpn_features['top_down_{}'.format(
feature_blocks[base_fpn_max_level - 2])]
# Construct coarse features
padding = 'VALID' if self._use_explicit_padding else 'SAME'
kernel_size = 3
for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1):
if self._use_depthwise:
conv_op = functools.partial(
slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
if self._use_explicit_padding:
last_feature_map = ops.fixed_padding(
last_feature_map, kernel_size)
last_feature_map = conv_op(
last_feature_map,
num_outputs=depth_fn(self._additional_layer_depth),
kernel_size=[kernel_size, kernel_size],
stride=2,
padding=padding,
scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13))
feature_maps.append(last_feature_map)
return feature_maps
| 8,906 | 43.094059 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.