repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
models
models-master/research/cognitive_planning/preprocessing/vgg_preprocessing.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides utilities to preprocess images. The preprocessing steps for VGG were introduced in the following technical report: Very Deep Convolutional Networks For Large-Scale Image Recognition Karen Simonyan and Andrew Zisserman arXiv technical report, 2015 PDF: http://arxiv.org/pdf/1409.1556.pdf ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf CC-BY-4.0 More information can be obtained from the VGG website: www.robots.ox.ac.uk/~vgg/research/very_deep/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf slim = tf.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 _RESIZE_SIDE_MIN = 256 _RESIZE_SIDE_MAX = 512 def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape) def _random_crop(image_list, crop_height, crop_width): """Crops the given list of images. The function applies the same crop to each image in the list. This can be effectively applied when there are multiple image inputs of the same dimension such as: image, depths, normals = _random_crop([image, depths, normals], 120, 150) Args: image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the new height. crop_width: the new width. Returns: the image_list with cropped images. Raises: ValueError: if there are multiple image inputs provided with different size or the images are smaller than the crop dimensions. """ if not image_list: raise ValueError('Empty image_list.') # Compute the rank assertions. rank_assertions = [] for i in range(len(image_list)): image_rank = tf.rank(image_list[i]) rank_assert = tf.Assert( tf.equal(image_rank, 3), ['Wrong rank for tensor %s [expected] [actual]', image_list[i].name, 3, image_rank]) rank_assertions.append(rank_assert) with tf.control_dependencies([rank_assertions[0]]): image_shape = tf.shape(image_list[0]) image_height = image_shape[0] image_width = image_shape[1] crop_size_assert = tf.Assert( tf.logical_and( tf.greater_equal(image_height, crop_height), tf.greater_equal(image_width, crop_width)), ['Crop size greater than the image size.']) asserts = [rank_assertions[0], crop_size_assert] for i in range(1, len(image_list)): image = image_list[i] asserts.append(rank_assertions[i]) with tf.control_dependencies([rank_assertions[i]]): shape = tf.shape(image) height = shape[0] width = shape[1] height_assert = tf.Assert( tf.equal(height, image_height), ['Wrong height for tensor %s [expected][actual]', image.name, height, image_height]) width_assert = tf.Assert( tf.equal(width, image_width), ['Wrong width for tensor %s [expected][actual]', image.name, width, image_width]) asserts.extend([height_assert, width_assert]) # Create a random bounding box. # # Use tf.random_uniform and not numpy.random.rand as doing the former would # generate random numbers at graph eval time, unlike the latter which # generates random numbers at graph definition time. with tf.control_dependencies(asserts): max_offset_height = tf.reshape(image_height - crop_height + 1, []) with tf.control_dependencies(asserts): max_offset_width = tf.reshape(image_width - crop_width + 1, []) offset_height = tf.random_uniform( [], maxval=max_offset_height, dtype=tf.int32) offset_width = tf.random_uniform( [], maxval=max_offset_width, dtype=tf.int32) return [_crop(image, offset_height, offset_width, crop_height, crop_width) for image in image_list] def _central_crop(image_list, crop_height, crop_width): """Performs central crops of the given image list. Args: image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: the list of cropped images. """ outputs = [] for image in image_list: image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = (image_height - crop_height) / 2 offset_width = (image_width - crop_width) / 2 outputs.append(_crop(image, offset_height, offset_width, crop_height, crop_width)) return outputs def _mean_image_subtraction(image, means): """Subtracts the given means from each image channel. For example: means = [123.68, 116.779, 103.939] image = _mean_image_subtraction(image, means) Note that the rank of `image` must be known. Args: image: a tensor of size [height, width, C]. means: a C-vector of values to subtract from each channel. Returns: the centered image. Raises: ValueError: If the rank of `image` is unknown, if `image` has a rank other than three or if the number of channels in `image` doesn't match the number of values in `means`. """ if image.get_shape().ndims != 3: raise ValueError('Input must be of size [height, width, C>0]') num_channels = image.get_shape().as_list()[-1] if len(means) != num_channels: raise ValueError('len(means) must match the number of channels') channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image) for i in range(num_channels): channels[i] -= means[i] return tf.concat(axis=2, values=channels) def _smallest_size_at_least(height, width, smallest_side): """Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) height = tf.to_float(height) width = tf.to_float(width) smallest_side = tf.to_float(smallest_side) scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) new_height = tf.to_int32(tf.rint(height * scale)) new_width = tf.to_int32(tf.rint(width * scale)) return new_height, new_width def _aspect_preserving_resize(image, smallest_side): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) shape = tf.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = tf.expand_dims(image, 0) resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False) resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image def preprocess_for_train(image, output_height, output_width, resize_side_min=_RESIZE_SIDE_MIN, resize_side_max=_RESIZE_SIDE_MAX): """Preprocesses the given image for training. Note that the actual resizing scale is sampled from [`resize_size_min`, `resize_size_max`]. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. resize_side_min: The lower bound for the smallest side of the image for aspect-preserving resizing. resize_side_max: The upper bound for the smallest side of the image for aspect-preserving resizing. Returns: A preprocessed image. """ resize_side = tf.random_uniform( [], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32) image = _aspect_preserving_resize(image, resize_side) image = _random_crop([image], output_height, output_width)[0] image.set_shape([output_height, output_width, 3]) image = tf.to_float(image) image = tf.image.random_flip_left_right(image) return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) def preprocess_for_eval(image, output_height, output_width, resize_side): """Preprocesses the given image for evaluation. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. resize_side: The smallest side of the image for aspect-preserving resizing. Returns: A preprocessed image. """ image = _aspect_preserving_resize(image, resize_side) image = _central_crop([image], output_height, output_width)[0] image.set_shape([output_height, output_width, 3]) image = tf.to_float(image) return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) def preprocess_image(image, output_height, output_width, is_training=False, resize_side_min=_RESIZE_SIDE_MIN, resize_side_max=_RESIZE_SIDE_MAX): """Preprocesses the given image. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. is_training: `True` if we're preprocessing the image for training and `False` otherwise. resize_side_min: The lower bound for the smallest side of the image for aspect-preserving resizing. If `is_training` is `False`, then this value is used for rescaling. resize_side_max: The upper bound for the smallest side of the image for aspect-preserving resizing. If `is_training` is `False`, this value is ignored. Otherwise, the resize side is sampled from [resize_size_min, resize_size_max]. Returns: A preprocessed image. """ if is_training: return preprocess_for_train(image, output_height, output_width, resize_side_min, resize_side_max) else: return preprocess_for_eval(image, output_height, output_width, resize_side_min)
12,923
34.311475
80
py
models
models-master/research/cognitive_planning/preprocessing/preprocessing_factory.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains a factory for building various models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from preprocessing import cifarnet_preprocessing from preprocessing import inception_preprocessing from preprocessing import lenet_preprocessing from preprocessing import vgg_preprocessing slim = tf.contrib.slim def get_preprocessing(name, is_training=False): """Returns preprocessing_fn(image, height, width, **kwargs). Args: name: The name of the preprocessing function. is_training: `True` if the model is being used for training and `False` otherwise. Returns: preprocessing_fn: A function that preprocessing a single image (pre-batch). It has the following signature: image = preprocessing_fn(image, output_height, output_width, ...). Raises: ValueError: If Preprocessing `name` is not recognized. """ preprocessing_fn_map = { 'cifarnet': cifarnet_preprocessing, 'inception': inception_preprocessing, 'inception_v1': inception_preprocessing, 'inception_v2': inception_preprocessing, 'inception_v3': inception_preprocessing, 'inception_v4': inception_preprocessing, 'inception_resnet_v2': inception_preprocessing, 'lenet': lenet_preprocessing, 'mobilenet_v1': inception_preprocessing, 'nasnet_mobile': inception_preprocessing, 'nasnet_large': inception_preprocessing, 'pnasnet_large': inception_preprocessing, 'resnet_v1_50': vgg_preprocessing, 'resnet_v1_101': vgg_preprocessing, 'resnet_v1_152': vgg_preprocessing, 'resnet_v1_200': vgg_preprocessing, 'resnet_v2_50': vgg_preprocessing, 'resnet_v2_101': vgg_preprocessing, 'resnet_v2_152': vgg_preprocessing, 'resnet_v2_200': vgg_preprocessing, 'vgg': vgg_preprocessing, 'vgg_a': vgg_preprocessing, 'vgg_16': vgg_preprocessing, 'vgg_19': vgg_preprocessing, } if name not in preprocessing_fn_map: raise ValueError('Preprocessing name [%s] was not recognized' % name) def preprocessing_fn(image, output_height, output_width, **kwargs): return preprocessing_fn_map[name].preprocess_image( image, output_height, output_width, is_training=is_training, **kwargs) return preprocessing_fn
3,036
36.036585
80
py
models
models-master/research/cognitive_planning/preprocessing/__init__.py
1
0
0
py
models
models-master/research/cognitive_planning/preprocessing/cifarnet_preprocessing.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides utilities to preprocess images in CIFAR-10. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf _PADDING = 4 slim = tf.contrib.slim def preprocess_for_train(image, output_height, output_width, padding=_PADDING, add_image_summaries=True): """Preprocesses the given image for training. Note that the actual resizing scale is sampled from [`resize_size_min`, `resize_size_max`]. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. padding: The amound of padding before and after each dimension of the image. add_image_summaries: Enable image summaries. Returns: A preprocessed image. """ if add_image_summaries: tf.summary.image('image', tf.expand_dims(image, 0)) # Transform the image to floats. image = tf.to_float(image) if padding > 0: image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]]) # Randomly crop a [height, width] section of the image. distorted_image = tf.random_crop(image, [output_height, output_width, 3]) # Randomly flip the image horizontally. distorted_image = tf.image.random_flip_left_right(distorted_image) if add_image_summaries: tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0)) # Because these operations are not commutative, consider randomizing # the order their operation. distorted_image = tf.image.random_brightness(distorted_image, max_delta=63) distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8) # Subtract off the mean and divide by the variance of the pixels. return tf.image.per_image_standardization(distorted_image) def preprocess_for_eval(image, output_height, output_width, add_image_summaries=True): """Preprocesses the given image for evaluation. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. add_image_summaries: Enable image summaries. Returns: A preprocessed image. """ if add_image_summaries: tf.summary.image('image', tf.expand_dims(image, 0)) # Transform the image to floats. image = tf.to_float(image) # Resize and crop if needed. resized_image = tf.image.resize_image_with_crop_or_pad(image, output_width, output_height) if add_image_summaries: tf.summary.image('resized_image', tf.expand_dims(resized_image, 0)) # Subtract off the mean and divide by the variance of the pixels. return tf.image.per_image_standardization(resized_image) def preprocess_image(image, output_height, output_width, is_training=False, add_image_summaries=True): """Preprocesses the given image. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. is_training: `True` if we're preprocessing the image for training and `False` otherwise. add_image_summaries: Enable image summaries. Returns: A preprocessed image. """ if is_training: return preprocess_for_train( image, output_height, output_width, add_image_summaries=add_image_summaries) else: return preprocess_for_eval( image, output_height, output_width, add_image_summaries=add_image_summaries)
4,650
35.054264
80
py
models
models-master/research/cognitive_planning/preprocessing/lenet_preprocessing.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides utilities for preprocessing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf slim = tf.contrib.slim def preprocess_image(image, output_height, output_width, is_training): """Preprocesses the given image. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. is_training: `True` if we're preprocessing the image for training and `False` otherwise. Returns: A preprocessed image. """ image = tf.to_float(image) image = tf.image.resize_image_with_crop_or_pad( image, output_width, output_height) image = tf.subtract(image, 128.0) image = tf.div(image, 128.0) return image
1,540
33.244444
80
py
models
models-master/research/efficient-hrl/run_eval.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Script for evaluating a UVF agent. To run locally: See scripts/local_eval.py """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import gin.tf # pylint: disable=unused-import import eval as eval_ # pylint: enable=unused-import flags = tf.app.flags FLAGS = flags.FLAGS def main(_): tf.logging.set_verbosity(tf.logging.INFO) assert FLAGS.checkpoint_dir, "Flag 'checkpoint_dir' must be set." assert FLAGS.eval_dir, "Flag 'eval_dir' must be set." if FLAGS.config_file: for config_file in FLAGS.config_file: gin.parse_config_file(config_file) if FLAGS.params: gin.parse_config(FLAGS.params) eval_.evaluate(FLAGS.checkpoint_dir, FLAGS.eval_dir) if __name__ == "__main__": tf.app.run()
1,496
27.788462
80
py
models
models-master/research/efficient-hrl/cond_fn.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Defines many boolean functions indicating when to step and reset. """ import tensorflow as tf import gin.tf @gin.configurable def env_transition(agent, state, action, transition_type, environment_steps, num_episodes): """True if the transition_type is TRANSITION or FINAL_TRANSITION. Args: agent: RL agent. state: A [num_state_dims] tensor representing a state. action: Action performed. transition_type: Type of transition after action environment_steps: Number of steps performed by environment. num_episodes: Number of episodes. Returns: cond: Returns an op that evaluates to true if the transition type is not RESTARTING """ del agent, state, action, num_episodes, environment_steps cond = tf.logical_not(transition_type) return cond @gin.configurable def env_restart(agent, state, action, transition_type, environment_steps, num_episodes): """True if the transition_type is RESTARTING. Args: agent: RL agent. state: A [num_state_dims] tensor representing a state. action: Action performed. transition_type: Type of transition after action environment_steps: Number of steps performed by environment. num_episodes: Number of episodes. Returns: cond: Returns an op that evaluates to true if the transition type equals RESTARTING. """ del agent, state, action, num_episodes, environment_steps cond = tf.identity(transition_type) return cond @gin.configurable def every_n_steps(agent, state, action, transition_type, environment_steps, num_episodes, n=150): """True once every n steps. Args: agent: RL agent. state: A [num_state_dims] tensor representing a state. action: Action performed. transition_type: Type of transition after action environment_steps: Number of steps performed by environment. num_episodes: Number of episodes. n: Return true once every n steps. Returns: cond: Returns an op that evaluates to true if environment_steps equals 0 mod n. We increment the step before checking this condition, so we do not need to add one to environment_steps. """ del agent, state, action, transition_type, num_episodes cond = tf.equal(tf.mod(environment_steps, n), 0) return cond @gin.configurable def every_n_episodes(agent, state, action, transition_type, environment_steps, num_episodes, n=2, steps_per_episode=None): """True once every n episodes. Specifically, evaluates to True on the 0th step of every nth episode. Unlike environment_steps, num_episodes starts at 0, so we do want to add one to ensure it does not reset on the first call. Args: agent: RL agent. state: A [num_state_dims] tensor representing a state. action: Action performed. transition_type: Type of transition after action environment_steps: Number of steps performed by environment. num_episodes: Number of episodes. n: Return true once every n episodes. steps_per_episode: How many steps per episode. Needed to determine when a new episode starts. Returns: cond: Returns an op that evaluates to true on the last step of the episode (i.e. if num_episodes equals 0 mod n). """ assert steps_per_episode is not None del agent, action, transition_type ant_fell = tf.logical_or(state[2] < 0.2, state[2] > 1.0) cond = tf.logical_and( tf.logical_or( ant_fell, tf.equal(tf.mod(num_episodes + 1, n), 0)), tf.equal(tf.mod(environment_steps, steps_per_episode), 0)) return cond @gin.configurable def failed_reset_after_n_episodes(agent, state, action, transition_type, environment_steps, num_episodes, steps_per_episode=None, reset_state=None, max_dist=1.0, epsilon=1e-10): """Every n episodes, returns True if the reset agent fails to return. Specifically, evaluates to True if the distance between the state and the reset state is greater than max_dist at the end of the episode. Args: agent: RL agent. state: A [num_state_dims] tensor representing a state. action: Action performed. transition_type: Type of transition after action environment_steps: Number of steps performed by environment. num_episodes: Number of episodes. steps_per_episode: How many steps per episode. Needed to determine when a new episode starts. reset_state: State to which the reset controller should return. max_dist: Agent is considered to have successfully reset if its distance from the reset_state is less than max_dist. epsilon: small offset to ensure non-negative/zero distance. Returns: cond: Returns an op that evaluates to true if num_episodes+1 equals 0 mod n. We add one to the num_episodes so the environment is not reset after the 0th step. """ assert steps_per_episode is not None assert reset_state is not None del agent, state, action, transition_type, num_episodes dist = tf.sqrt( tf.reduce_sum(tf.squared_difference(state, reset_state)) + epsilon) cond = tf.logical_and( tf.greater(dist, tf.constant(max_dist)), tf.equal(tf.mod(environment_steps, steps_per_episode), 0)) return cond @gin.configurable def q_too_small(agent, state, action, transition_type, environment_steps, num_episodes, q_min=0.5): """True of q is too small. Args: agent: RL agent. state: A [num_state_dims] tensor representing a state. action: Action performed. transition_type: Type of transition after action environment_steps: Number of steps performed by environment. num_episodes: Number of episodes. q_min: Returns true if the qval is less than q_min Returns: cond: Returns an op that evaluates to true if qval is less than q_min. """ del transition_type, environment_steps, num_episodes state_for_reset_agent = tf.stack(state[:-1], tf.constant([0], dtype=tf.float)) qval = agent.BASE_AGENT_CLASS.critic_net( tf.expand_dims(state_for_reset_agent, 0), tf.expand_dims(action, 0))[0, :] cond = tf.greater(tf.constant(q_min), qval) return cond @gin.configurable def true_fn(agent, state, action, transition_type, environment_steps, num_episodes): """Returns an op that evaluates to true. Args: agent: RL agent. state: A [num_state_dims] tensor representing a state. action: Action performed. transition_type: Type of transition after action environment_steps: Number of steps performed by environment. num_episodes: Number of episodes. Returns: cond: op that always evaluates to True. """ del agent, state, action, transition_type, environment_steps, num_episodes cond = tf.constant(True, dtype=tf.bool) return cond @gin.configurable def false_fn(agent, state, action, transition_type, environment_steps, num_episodes): """Returns an op that evaluates to false. Args: agent: RL agent. state: A [num_state_dims] tensor representing a state. action: Action performed. transition_type: Type of transition after action environment_steps: Number of steps performed by environment. num_episodes: Number of episodes. Returns: cond: op that always evaluates to False. """ del agent, state, action, transition_type, environment_steps, num_episodes cond = tf.constant(False, dtype=tf.bool) return cond
8,659
34.346939
80
py
models
models-master/research/efficient-hrl/run_env.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Random policy on an environment.""" import tensorflow as tf import numpy as np import random from environments import create_maze_env app = tf.app flags = tf.flags logging = tf.logging FLAGS = flags.FLAGS flags.DEFINE_string('env', 'AntMaze', 'environment name: AntMaze, AntPush, or AntFall') flags.DEFINE_integer('episode_length', 500, 'episode length') flags.DEFINE_integer('num_episodes', 50, 'number of episodes') def get_goal_sample_fn(env_name): if env_name == 'AntMaze': # NOTE: When evaluating (i.e. the metrics shown in the paper, # we use the commented out goal sampling function. The uncommented # one is only used for training. #return lambda: np.array([0., 16.]) return lambda: np.random.uniform((-4, -4), (20, 20)) elif env_name == 'AntPush': return lambda: np.array([0., 19.]) elif env_name == 'AntFall': return lambda: np.array([0., 27., 4.5]) else: assert False, 'Unknown env' def get_reward_fn(env_name): if env_name == 'AntMaze': return lambda obs, goal: -np.sum(np.square(obs[:2] - goal)) ** 0.5 elif env_name == 'AntPush': return lambda obs, goal: -np.sum(np.square(obs[:2] - goal)) ** 0.5 elif env_name == 'AntFall': return lambda obs, goal: -np.sum(np.square(obs[:3] - goal)) ** 0.5 else: assert False, 'Unknown env' def success_fn(last_reward): return last_reward > -5.0 class EnvWithGoal(object): def __init__(self, base_env, env_name): self.base_env = base_env self.goal_sample_fn = get_goal_sample_fn(env_name) self.reward_fn = get_reward_fn(env_name) self.goal = None def reset(self): obs = self.base_env.reset() self.goal = self.goal_sample_fn() return np.concatenate([obs, self.goal]) def step(self, a): obs, _, done, info = self.base_env.step(a) reward = self.reward_fn(obs, self.goal) return np.concatenate([obs, self.goal]), reward, done, info @property def action_space(self): return self.base_env.action_space def run_environment(env_name, episode_length, num_episodes): env = EnvWithGoal( create_maze_env.create_maze_env(env_name).gym, env_name) def action_fn(obs): action_space = env.action_space action_space_mean = (action_space.low + action_space.high) / 2.0 action_space_magn = (action_space.high - action_space.low) / 2.0 random_action = (action_space_mean + action_space_magn * np.random.uniform(low=-1.0, high=1.0, size=action_space.shape)) return random_action rewards = [] successes = [] for ep in range(num_episodes): rewards.append(0.0) successes.append(False) obs = env.reset() for _ in range(episode_length): obs, reward, done, _ = env.step(action_fn(obs)) rewards[-1] += reward successes[-1] = success_fn(reward) if done: break logging.info('Episode %d reward: %.2f, Success: %d', ep + 1, rewards[-1], successes[-1]) logging.info('Average Reward over %d episodes: %.2f', num_episodes, np.mean(rewards)) logging.info('Average Success over %d episodes: %.2f', num_episodes, np.mean(successes)) def main(unused_argv): logging.set_verbosity(logging.INFO) run_environment(FLAGS.env, FLAGS.episode_length, FLAGS.num_episodes) if __name__ == '__main__': app.run()
4,062
30.253846
92
py
models
models-master/research/efficient-hrl/train_utils.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import os import time import tensorflow as tf import gin.tf flags = tf.app.flags flags.DEFINE_multi_string('config_file', None, 'List of paths to the config files.') flags.DEFINE_multi_string('params', None, 'Newline separated list of Gin parameter bindings.') flags.DEFINE_string('train_dir', None, 'Directory for writing logs/summaries during training.') flags.DEFINE_string('master', 'local', 'BNS name of the TensorFlow master to use.') flags.DEFINE_integer('task', 0, 'task id') flags.DEFINE_integer('save_interval_secs', 300, 'The frequency at which ' 'checkpoints are saved, in seconds.') flags.DEFINE_integer('save_summaries_secs', 30, 'The frequency at which ' 'summaries are saved, in seconds.') flags.DEFINE_boolean('summarize_gradients', False, 'Whether to generate gradient summaries.') FLAGS = flags.FLAGS TrainOps = namedtuple('TrainOps', ['train_op', 'meta_train_op', 'collect_experience_op']) class TrainStep(object): """Handles training step.""" def __init__(self, max_number_of_steps=0, num_updates_per_observation=1, num_collect_per_update=1, num_collect_per_meta_update=1, log_every_n_steps=1, policy_save_fn=None, save_policy_every_n_steps=0, should_stop_early=None): """Returns a function that is executed at each step of slim training. Args: max_number_of_steps: Optional maximum number of train steps to take. num_updates_per_observation: Number of updates per observation. log_every_n_steps: The frequency, in terms of global steps, that the loss and global step and logged. policy_save_fn: A tf.Saver().save function to save the policy. save_policy_every_n_steps: How frequently to save the policy. should_stop_early: Optional hook to report whether training should stop. Raises: ValueError: If policy_save_fn is not provided when save_policy_every_n_steps > 0. """ if save_policy_every_n_steps and policy_save_fn is None: raise ValueError( 'policy_save_fn is required when save_policy_every_n_steps > 0') self.max_number_of_steps = max_number_of_steps self.num_updates_per_observation = num_updates_per_observation self.num_collect_per_update = num_collect_per_update self.num_collect_per_meta_update = num_collect_per_meta_update self.log_every_n_steps = log_every_n_steps self.policy_save_fn = policy_save_fn self.save_policy_every_n_steps = save_policy_every_n_steps self.should_stop_early = should_stop_early self.last_global_step_val = 0 self.train_op_fn = None self.collect_and_train_fn = None tf.logging.info('Training for %d max_number_of_steps', self.max_number_of_steps) def train_step(self, sess, train_ops, global_step, _): """This function will be called at each step of training. This represents one step of the DDPG algorithm and can include: 1. collect a <state, action, reward, next_state> transition 2. update the target network 3. train the actor 4. train the critic Args: sess: A Tensorflow session. train_ops: A DdpgTrainOps tuple of train ops to run. global_step: The global step. Returns: A scalar total loss. A boolean should stop. """ start_time = time.time() if self.train_op_fn is None: self.train_op_fn = sess.make_callable([train_ops.train_op, global_step]) self.meta_train_op_fn = sess.make_callable([train_ops.meta_train_op, global_step]) self.collect_fn = sess.make_callable([train_ops.collect_experience_op, global_step]) self.collect_and_train_fn = sess.make_callable( [train_ops.train_op, global_step, train_ops.collect_experience_op]) self.collect_and_meta_train_fn = sess.make_callable( [train_ops.meta_train_op, global_step, train_ops.collect_experience_op]) for _ in range(self.num_collect_per_update - 1): self.collect_fn() for _ in range(self.num_updates_per_observation - 1): self.train_op_fn() total_loss, global_step_val, _ = self.collect_and_train_fn() if (global_step_val // self.num_collect_per_meta_update != self.last_global_step_val // self.num_collect_per_meta_update): self.meta_train_op_fn() time_elapsed = time.time() - start_time should_stop = False if self.max_number_of_steps: should_stop = global_step_val >= self.max_number_of_steps if global_step_val != self.last_global_step_val: if (self.save_policy_every_n_steps and global_step_val // self.save_policy_every_n_steps != self.last_global_step_val // self.save_policy_every_n_steps): self.policy_save_fn(sess) if (self.log_every_n_steps and global_step_val % self.log_every_n_steps == 0): tf.logging.info( 'global step %d: loss = %.4f (%.3f sec/step) (%d steps/sec)', global_step_val, total_loss, time_elapsed, 1 / time_elapsed) self.last_global_step_val = global_step_val stop_early = bool(self.should_stop_early and self.should_stop_early()) return total_loss, should_stop or stop_early def create_counter_summaries(counters): """Add named summaries to counters, a list of tuples (name, counter).""" if counters: with tf.name_scope('Counters/'): for name, counter in counters: tf.summary.scalar(name, counter) def gen_debug_batch_summaries(batch): """Generates summaries for the sampled replay batch.""" states, actions, rewards, _, next_states = batch with tf.name_scope('batch'): for s in range(states.get_shape()[-1]): tf.summary.histogram('states_%d' % s, states[:, s]) for s in range(states.get_shape()[-1]): tf.summary.histogram('next_states_%d' % s, next_states[:, s]) for a in range(actions.get_shape()[-1]): tf.summary.histogram('actions_%d' % a, actions[:, a]) tf.summary.histogram('rewards', rewards)
7,027
38.931818
90
py
models
models-master/research/efficient-hrl/agent.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A UVF agent. """ import tensorflow as tf import gin.tf from agents import ddpg_agent # pylint: disable=unused-import import cond_fn from utils import utils as uvf_utils from context import gin_imports # pylint: enable=unused-import slim = tf.contrib.slim @gin.configurable class UvfAgentCore(object): """Defines basic functions for UVF agent. Must be inherited with an RL agent. Used as lower-level agent. """ def __init__(self, observation_spec, action_spec, tf_env, tf_context, step_cond_fn=cond_fn.env_transition, reset_episode_cond_fn=cond_fn.env_restart, reset_env_cond_fn=cond_fn.false_fn, metrics=None, **base_agent_kwargs): """Constructs a UVF agent. Args: observation_spec: A TensorSpec defining the observations. action_spec: A BoundedTensorSpec defining the actions. tf_env: A Tensorflow environment object. tf_context: A Context class. step_cond_fn: A function indicating whether to increment the num of steps. reset_episode_cond_fn: A function indicating whether to restart the episode, resampling the context. reset_env_cond_fn: A function indicating whether to perform a manual reset of the environment. metrics: A list of functions that evaluate metrics of the agent. **base_agent_kwargs: A dictionary of parameters for base RL Agent. Raises: ValueError: If 'dqda_clipping' is < 0. """ self._step_cond_fn = step_cond_fn self._reset_episode_cond_fn = reset_episode_cond_fn self._reset_env_cond_fn = reset_env_cond_fn self.metrics = metrics # expose tf_context methods self.tf_context = tf_context(tf_env=tf_env) self.set_replay = self.tf_context.set_replay self.sample_contexts = self.tf_context.sample_contexts self.compute_rewards = self.tf_context.compute_rewards self.gamma_index = self.tf_context.gamma_index self.context_specs = self.tf_context.context_specs self.context_as_action_specs = self.tf_context.context_as_action_specs self.init_context_vars = self.tf_context.create_vars self.env_observation_spec = observation_spec[0] merged_observation_spec = (uvf_utils.merge_specs( (self.env_observation_spec,) + self.context_specs),) self._context_vars = dict() self._action_vars = dict() self.BASE_AGENT_CLASS.__init__( self, observation_spec=merged_observation_spec, action_spec=action_spec, **base_agent_kwargs ) def set_meta_agent(self, agent=None): self._meta_agent = agent @property def meta_agent(self): return self._meta_agent def actor_loss(self, states, actions, rewards, discounts, next_states): """Returns the next action for the state. Args: state: A [num_state_dims] tensor representing a state. context: A list of [num_context_dims] tensor representing a context. Returns: A [num_action_dims] tensor representing the action. """ return self.BASE_AGENT_CLASS.actor_loss(self, states) def action(self, state, context=None): """Returns the next action for the state. Args: state: A [num_state_dims] tensor representing a state. context: A list of [num_context_dims] tensor representing a context. Returns: A [num_action_dims] tensor representing the action. """ merged_state = self.merged_state(state, context) return self.BASE_AGENT_CLASS.action(self, merged_state) def actions(self, state, context=None): """Returns the next action for the state. Args: state: A [-1, num_state_dims] tensor representing a state. context: A list of [-1, num_context_dims] tensor representing a context. Returns: A [-1, num_action_dims] tensor representing the action. """ merged_states = self.merged_states(state, context) return self.BASE_AGENT_CLASS.actor_net(self, merged_states) def log_probs(self, states, actions, state_reprs, contexts=None): assert contexts is not None batch_dims = [tf.shape(states)[0], tf.shape(states)[1]] contexts = self.tf_context.context_multi_transition_fn( contexts, states=tf.to_float(state_reprs)) flat_states = tf.reshape(states, [batch_dims[0] * batch_dims[1], states.shape[-1]]) flat_contexts = [tf.reshape(tf.cast(context, states.dtype), [batch_dims[0] * batch_dims[1], context.shape[-1]]) for context in contexts] flat_pred_actions = self.actions(flat_states, flat_contexts) pred_actions = tf.reshape(flat_pred_actions, batch_dims + [flat_pred_actions.shape[-1]]) error = tf.square(actions - pred_actions) spec_range = (self._action_spec.maximum - self._action_spec.minimum) / 2 normalized_error = tf.cast(error, tf.float64) / tf.constant(spec_range) ** 2 return -normalized_error @gin.configurable('uvf_add_noise_fn') def add_noise_fn(self, action_fn, stddev=1.0, debug=False, clip=True, global_step=None): """Returns the action_fn with additive Gaussian noise. Args: action_fn: A callable(`state`, `context`) which returns a [num_action_dims] tensor representing a action. stddev: stddev for the Ornstein-Uhlenbeck noise. debug: Print debug messages. Returns: A [num_action_dims] action tensor. """ if global_step is not None: stddev *= tf.maximum( # Decay exploration during training. tf.train.exponential_decay(1.0, global_step, 1e6, 0.8), 0.5) def noisy_action_fn(state, context=None): """Noisy action fn.""" action = action_fn(state, context) if debug: action = uvf_utils.tf_print( action, [action], message='[add_noise_fn] pre-noise action', first_n=100) noise_dist = tf.distributions.Normal(tf.zeros_like(action), tf.ones_like(action) * stddev) noise = noise_dist.sample() action += noise if debug: action = uvf_utils.tf_print( action, [action], message='[add_noise_fn] post-noise action', first_n=100) if clip: action = uvf_utils.clip_to_spec(action, self._action_spec) return action return noisy_action_fn def merged_state(self, state, context=None): """Returns the merged state from the environment state and contexts. Args: state: A [num_state_dims] tensor representing a state. context: A list of [num_context_dims] tensor representing a context. If None, use the internal context. Returns: A [num_merged_state_dims] tensor representing the merged state. """ if context is None: context = list(self.context_vars) state = tf.concat([state,] + context, axis=-1) self._validate_states(self._batch_state(state)) return state def merged_states(self, states, contexts=None): """Returns the batch merged state from the batch env state and contexts. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. contexts: A list of [batch_size, num_context_dims] tensor representing a batch of contexts. If None, use the internal context. Returns: A [batch_size, num_merged_state_dims] tensor representing the batch of merged states. """ if contexts is None: contexts = [tf.tile(tf.expand_dims(context, axis=0), (tf.shape(states)[0], 1)) for context in self.context_vars] states = tf.concat([states,] + contexts, axis=-1) self._validate_states(states) return states def unmerged_states(self, merged_states): """Returns the batch state and contexts from the batch merged state. Args: merged_states: A [batch_size, num_merged_state_dims] tensor representing a batch of merged states. Returns: A [batch_size, num_state_dims] tensor and a list of [batch_size, num_context_dims] tensors representing the batch state and contexts respectively. """ self._validate_states(merged_states) num_state_dims = self.env_observation_spec.shape.as_list()[0] num_context_dims_list = [c.shape.as_list()[0] for c in self.context_specs] states = merged_states[:, :num_state_dims] contexts = [] i = num_state_dims for num_context_dims in num_context_dims_list: contexts.append(merged_states[:, i: i+num_context_dims]) i += num_context_dims return states, contexts def sample_random_actions(self, batch_size=1): """Return random actions. Args: batch_size: Batch size. Returns: A [batch_size, num_action_dims] tensor representing the batch of actions. """ actions = tf.concat( [ tf.random_uniform( shape=(batch_size, 1), minval=self._action_spec.minimum[i], maxval=self._action_spec.maximum[i]) for i in range(self._action_spec.shape[0].value) ], axis=1) return actions def clip_actions(self, actions): """Clip actions to spec. Args: actions: A [batch_size, num_action_dims] tensor representing the batch of actions. Returns: A [batch_size, num_action_dims] tensor representing the batch of clipped actions. """ actions = tf.concat( [ tf.clip_by_value( actions[:, i:i+1], self._action_spec.minimum[i], self._action_spec.maximum[i]) for i in range(self._action_spec.shape[0].value) ], axis=1) return actions def mix_contexts(self, contexts, insert_contexts, indices): """Mix two contexts based on indices. Args: contexts: A list of [batch_size, num_context_dims] tensor representing the batch of contexts. insert_contexts: A list of [batch_size, num_context_dims] tensor representing the batch of contexts to be inserted. indices: A list of a list of integers denoting indices to replace. Returns: A list of resulting contexts. """ if indices is None: indices = [[]] * len(contexts) assert len(contexts) == len(indices) assert all([spec.shape.ndims == 1 for spec in self.context_specs]) mix_contexts = [] for contexts_, insert_contexts_, indices_, spec in zip( contexts, insert_contexts, indices, self.context_specs): mix_contexts.append( tf.concat( [ insert_contexts_[:, i:i + 1] if i in indices_ else contexts_[:, i:i + 1] for i in range(spec.shape.as_list()[0]) ], axis=1)) return mix_contexts def begin_episode_ops(self, mode, action_fn=None, state=None): """Returns ops that reset agent at beginning of episodes. Args: mode: a string representing the mode=[train, explore, eval]. Returns: A list of ops. """ all_ops = [] for _, action_var in sorted(self._action_vars.items()): sample_action = self.sample_random_actions(1)[0] all_ops.append(tf.assign(action_var, sample_action)) all_ops += self.tf_context.reset(mode=mode, agent=self._meta_agent, action_fn=action_fn, state=state) return all_ops def cond_begin_episode_op(self, cond, input_vars, mode, meta_action_fn): """Returns op that resets agent at beginning of episodes. A new episode is begun if the cond op evalues to `False`. Args: cond: a Boolean tensor variable. input_vars: A list of tensor variables. mode: a string representing the mode=[train, explore, eval]. Returns: Conditional begin op. """ (state, action, reward, next_state, state_repr, next_state_repr) = input_vars def continue_fn(): """Continue op fn.""" items = [state, action, reward, next_state, state_repr, next_state_repr] + list(self.context_vars) batch_items = [tf.expand_dims(item, 0) for item in items] (states, actions, rewards, next_states, state_reprs, next_state_reprs) = batch_items[:6] context_reward = self.compute_rewards( mode, state_reprs, actions, rewards, next_state_reprs, batch_items[6:])[0][0] context_reward = tf.cast(context_reward, dtype=reward.dtype) if self.meta_agent is not None: meta_action = tf.concat(self.context_vars, -1) items = [state, meta_action, reward, next_state, state_repr, next_state_repr] + list(self.meta_agent.context_vars) batch_items = [tf.expand_dims(item, 0) for item in items] (states, meta_actions, rewards, next_states, state_reprs, next_state_reprs) = batch_items[:6] meta_reward = self.meta_agent.compute_rewards( mode, states, meta_actions, rewards, next_states, batch_items[6:])[0][0] meta_reward = tf.cast(meta_reward, dtype=reward.dtype) else: meta_reward = tf.constant(0, dtype=reward.dtype) with tf.control_dependencies([context_reward, meta_reward]): step_ops = self.tf_context.step(mode=mode, agent=self._meta_agent, state=state, next_state=next_state, state_repr=state_repr, next_state_repr=next_state_repr, action_fn=meta_action_fn) with tf.control_dependencies(step_ops): context_reward, meta_reward = map(tf.identity, [context_reward, meta_reward]) return context_reward, meta_reward def begin_episode_fn(): """Begin op fn.""" begin_ops = self.begin_episode_ops(mode=mode, action_fn=meta_action_fn, state=state) with tf.control_dependencies(begin_ops): return tf.zeros_like(reward), tf.zeros_like(reward) with tf.control_dependencies(input_vars): cond_begin_episode_op = tf.cond(cond, continue_fn, begin_episode_fn) return cond_begin_episode_op def get_env_base_wrapper(self, env_base, **begin_kwargs): """Create a wrapper around env_base, with agent-specific begin/end_episode. Args: env_base: A python environment base. **begin_kwargs: Keyword args for begin_episode_ops. Returns: An object with begin_episode() and end_episode(). """ begin_ops = self.begin_episode_ops(**begin_kwargs) return uvf_utils.get_contextual_env_base(env_base, begin_ops) def init_action_vars(self, name, i=None): """Create and return a tensorflow Variable holding an action. Args: name: Name of the variables. i: Integer id. Returns: A [num_action_dims] tensor. """ if i is not None: name += '_%d' % i assert name not in self._action_vars, ('Conflict! %s is already ' 'initialized.') % name self._action_vars[name] = tf.Variable( self.sample_random_actions(1)[0], name='%s_action' % (name)) self._validate_actions(tf.expand_dims(self._action_vars[name], 0)) return self._action_vars[name] @gin.configurable('uvf_critic_function') def critic_function(self, critic_vals, states, critic_fn=None): """Computes q values based on outputs from the critic net. Args: critic_vals: A tf.float32 [batch_size, ...] tensor representing outputs from the critic net. states: A [batch_size, num_state_dims] tensor representing a batch of states. critic_fn: A callable that process outputs from critic_net and outputs a [batch_size] tensor representing q values. Returns: A tf.float32 [batch_size] tensor representing q values. """ if critic_fn is not None: env_states, contexts = self.unmerged_states(states) critic_vals = critic_fn(critic_vals, env_states, contexts) critic_vals.shape.assert_has_rank(1) return critic_vals def get_action_vars(self, key): return self._action_vars[key] def get_context_vars(self, key): return self.tf_context.context_vars[key] def step_cond_fn(self, *args): return self._step_cond_fn(self, *args) def reset_episode_cond_fn(self, *args): return self._reset_episode_cond_fn(self, *args) def reset_env_cond_fn(self, *args): return self._reset_env_cond_fn(self, *args) @property def context_vars(self): return self.tf_context.vars @gin.configurable class MetaAgentCore(UvfAgentCore): """Defines basic functions for UVF Meta-agent. Must be inherited with an RL agent. Used as higher-level agent. """ def __init__(self, observation_spec, action_spec, tf_env, tf_context, sub_context, step_cond_fn=cond_fn.env_transition, reset_episode_cond_fn=cond_fn.env_restart, reset_env_cond_fn=cond_fn.false_fn, metrics=None, actions_reg=0., k=2, **base_agent_kwargs): """Constructs a Meta agent. Args: observation_spec: A TensorSpec defining the observations. action_spec: A BoundedTensorSpec defining the actions. tf_env: A Tensorflow environment object. tf_context: A Context class. step_cond_fn: A function indicating whether to increment the num of steps. reset_episode_cond_fn: A function indicating whether to restart the episode, resampling the context. reset_env_cond_fn: A function indicating whether to perform a manual reset of the environment. metrics: A list of functions that evaluate metrics of the agent. **base_agent_kwargs: A dictionary of parameters for base RL Agent. Raises: ValueError: If 'dqda_clipping' is < 0. """ self._step_cond_fn = step_cond_fn self._reset_episode_cond_fn = reset_episode_cond_fn self._reset_env_cond_fn = reset_env_cond_fn self.metrics = metrics self._actions_reg = actions_reg self._k = k # expose tf_context methods self.tf_context = tf_context(tf_env=tf_env) self.sub_context = sub_context(tf_env=tf_env) self.set_replay = self.tf_context.set_replay self.sample_contexts = self.tf_context.sample_contexts self.compute_rewards = self.tf_context.compute_rewards self.gamma_index = self.tf_context.gamma_index self.context_specs = self.tf_context.context_specs self.context_as_action_specs = self.tf_context.context_as_action_specs self.sub_context_as_action_specs = self.sub_context.context_as_action_specs self.init_context_vars = self.tf_context.create_vars self.env_observation_spec = observation_spec[0] merged_observation_spec = (uvf_utils.merge_specs( (self.env_observation_spec,) + self.context_specs),) self._context_vars = dict() self._action_vars = dict() assert len(self.context_as_action_specs) == 1 self.BASE_AGENT_CLASS.__init__( self, observation_spec=merged_observation_spec, action_spec=self.sub_context_as_action_specs, **base_agent_kwargs ) @gin.configurable('meta_add_noise_fn') def add_noise_fn(self, action_fn, stddev=1.0, debug=False, global_step=None): noisy_action_fn = super(MetaAgentCore, self).add_noise_fn( action_fn, stddev, clip=True, global_step=global_step) return noisy_action_fn def actor_loss(self, states, actions, rewards, discounts, next_states): """Returns the next action for the state. Args: state: A [num_state_dims] tensor representing a state. context: A list of [num_context_dims] tensor representing a context. Returns: A [num_action_dims] tensor representing the action. """ actions = self.actor_net(states, stop_gradients=False) regularizer = self._actions_reg * tf.reduce_mean( tf.reduce_sum(tf.abs(actions[:, self._k:]), -1), 0) loss = self.BASE_AGENT_CLASS.actor_loss(self, states) return regularizer + loss @gin.configurable class UvfAgent(UvfAgentCore, ddpg_agent.TD3Agent): """A DDPG agent with UVF. """ BASE_AGENT_CLASS = ddpg_agent.TD3Agent ACTION_TYPE = 'continuous' def __init__(self, *args, **kwargs): UvfAgentCore.__init__(self, *args, **kwargs) @gin.configurable class MetaAgent(MetaAgentCore, ddpg_agent.TD3Agent): """A DDPG meta-agent. """ BASE_AGENT_CLASS = ddpg_agent.TD3Agent ACTION_TYPE = 'continuous' def __init__(self, *args, **kwargs): MetaAgentCore.__init__(self, *args, **kwargs) @gin.configurable() def state_preprocess_net( states, num_output_dims=2, states_hidden_layers=(100,), normalizer_fn=None, activation_fn=tf.nn.relu, zero_time=True, images=False): """Creates a simple feed forward net for embedding states. """ with slim.arg_scope( [slim.fully_connected], activation_fn=activation_fn, normalizer_fn=normalizer_fn, weights_initializer=slim.variance_scaling_initializer( factor=1.0/3.0, mode='FAN_IN', uniform=True)): states_shape = tf.shape(states) states_dtype = states.dtype states = tf.to_float(states) if images: # Zero-out x-y states *= tf.constant([0.] * 2 + [1.] * (states.shape[-1] - 2), dtype=states.dtype) if zero_time: states *= tf.constant([1.] * (states.shape[-1] - 1) + [0.], dtype=states.dtype) orig_states = states embed = states if states_hidden_layers: embed = slim.stack(embed, slim.fully_connected, states_hidden_layers, scope='states') with slim.arg_scope([slim.fully_connected], weights_regularizer=None, weights_initializer=tf.random_uniform_initializer( minval=-0.003, maxval=0.003)): embed = slim.fully_connected(embed, num_output_dims, activation_fn=None, normalizer_fn=None, scope='value') output = embed output = tf.cast(output, states_dtype) return output @gin.configurable() def action_embed_net( actions, states=None, num_output_dims=2, hidden_layers=(400, 300), normalizer_fn=None, activation_fn=tf.nn.relu, zero_time=True, images=False): """Creates a simple feed forward net for embedding actions. """ with slim.arg_scope( [slim.fully_connected], activation_fn=activation_fn, normalizer_fn=normalizer_fn, weights_initializer=slim.variance_scaling_initializer( factor=1.0/3.0, mode='FAN_IN', uniform=True)): actions = tf.to_float(actions) if states is not None: if images: # Zero-out x-y states *= tf.constant([0.] * 2 + [1.] * (states.shape[-1] - 2), dtype=states.dtype) if zero_time: states *= tf.constant([1.] * (states.shape[-1] - 1) + [0.], dtype=states.dtype) actions = tf.concat([actions, tf.to_float(states)], -1) embed = actions if hidden_layers: embed = slim.stack(embed, slim.fully_connected, hidden_layers, scope='hidden') with slim.arg_scope([slim.fully_connected], weights_regularizer=None, weights_initializer=tf.random_uniform_initializer( minval=-0.003, maxval=0.003)): embed = slim.fully_connected(embed, num_output_dims, activation_fn=None, normalizer_fn=None, scope='value') if num_output_dims == 1: return embed[:, 0, ...] else: return embed def huber(x, kappa=0.1): return (0.5 * tf.square(x) * tf.to_float(tf.abs(x) <= kappa) + kappa * (tf.abs(x) - 0.5 * kappa) * tf.to_float(tf.abs(x) > kappa) ) / kappa @gin.configurable() class StatePreprocess(object): STATE_PREPROCESS_NET_SCOPE = 'state_process_net' ACTION_EMBED_NET_SCOPE = 'action_embed_net' def __init__(self, trainable=False, state_preprocess_net=lambda states: states, action_embed_net=lambda actions, *args, **kwargs: actions, ndims=None): self.trainable = trainable self._scope = tf.get_variable_scope().name self._ndims = ndims self._state_preprocess_net = tf.make_template( self.STATE_PREPROCESS_NET_SCOPE, state_preprocess_net, create_scope_now_=True) self._action_embed_net = tf.make_template( self.ACTION_EMBED_NET_SCOPE, action_embed_net, create_scope_now_=True) def __call__(self, states): batched = states.get_shape().ndims != 1 if not batched: states = tf.expand_dims(states, 0) embedded = self._state_preprocess_net(states) if self._ndims is not None: embedded = embedded[..., :self._ndims] if not batched: return embedded[0] return embedded def loss(self, states, next_states, low_actions, low_states): batch_size = tf.shape(states)[0] d = int(low_states.shape[1]) # Sample indices into meta-transition to train on. probs = 0.99 ** tf.range(d, dtype=tf.float32) probs *= tf.constant([1.0] * (d - 1) + [1.0 / (1 - 0.99)], dtype=tf.float32) probs /= tf.reduce_sum(probs) index_dist = tf.distributions.Categorical(probs=probs, dtype=tf.int64) indices = index_dist.sample(batch_size) batch_size = tf.cast(batch_size, tf.int64) next_indices = tf.concat( [tf.range(batch_size, dtype=tf.int64)[:, None], (1 + indices[:, None]) % d], -1) new_next_states = tf.where(indices < d - 1, tf.gather_nd(low_states, next_indices), next_states) next_states = new_next_states embed1 = tf.to_float(self._state_preprocess_net(states)) embed2 = tf.to_float(self._state_preprocess_net(next_states)) action_embed = self._action_embed_net( tf.layers.flatten(low_actions), states=states) tau = 2.0 fn = lambda z: tau * tf.reduce_sum(huber(z), -1) all_embed = tf.get_variable('all_embed', [1024, int(embed1.shape[-1])], initializer=tf.zeros_initializer()) upd = all_embed.assign(tf.concat([all_embed[batch_size:], embed2], 0)) with tf.control_dependencies([upd]): close = 1 * tf.reduce_mean(fn(embed1 + action_embed - embed2)) prior_log_probs = tf.reduce_logsumexp( -fn((embed1 + action_embed)[:, None, :] - all_embed[None, :, :]), axis=-1) - tf.log(tf.to_float(all_embed.shape[0])) far = tf.reduce_mean(tf.exp(-fn((embed1 + action_embed)[1:] - embed2[:-1]) - tf.stop_gradient(prior_log_probs[1:]))) repr_log_probs = tf.stop_gradient( -fn(embed1 + action_embed - embed2) - prior_log_probs) / tau return close + far, repr_log_probs, indices def get_trainable_vars(self): return ( slim.get_trainable_variables( uvf_utils.join_scope(self._scope, self.STATE_PREPROCESS_NET_SCOPE)) + slim.get_trainable_variables( uvf_utils.join_scope(self._scope, self.ACTION_EMBED_NET_SCOPE))) @gin.configurable() class InverseDynamics(object): INVERSE_DYNAMICS_NET_SCOPE = 'inverse_dynamics' def __init__(self, spec): self._spec = spec def sample(self, states, next_states, num_samples, orig_goals, sc=0.5): goal_dim = orig_goals.shape[-1] spec_range = (self._spec.maximum - self._spec.minimum) / 2 * tf.ones([goal_dim]) loc = tf.cast(next_states - states, tf.float32)[:, :goal_dim] scale = sc * tf.tile(tf.reshape(spec_range, [1, goal_dim]), [tf.shape(states)[0], 1]) dist = tf.distributions.Normal(loc, scale) if num_samples == 1: return dist.sample() samples = tf.concat([dist.sample(num_samples - 2), tf.expand_dims(loc, 0), tf.expand_dims(orig_goals, 0)], 0) return uvf_utils.clip_to_spec(samples, self._spec)
29,026
36.454194
91
py
models
models-master/research/efficient-hrl/eval.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Script for evaluating a UVF agent. To run locally: See run_eval.py To run on borg: See train_eval.borg """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow as tf slim = tf.contrib.slim import gin.tf # pylint: disable=unused-import import agent import train from utils import utils as uvf_utils from utils import eval_utils from environments import create_maze_env # pylint: enable=unused-import flags = tf.app.flags flags.DEFINE_string('eval_dir', None, 'Directory for writing logs/summaries during eval.') flags.DEFINE_string('checkpoint_dir', None, 'Directory containing checkpoints to eval.') FLAGS = flags.FLAGS def get_evaluate_checkpoint_fn(master, output_dir, eval_step_fns, model_rollout_fn, gamma, max_steps_per_episode, num_episodes_eval, num_episodes_videos, tuner_hook, generate_videos, generate_summaries, video_settings): """Returns a function that evaluates a given checkpoint. Args: master: BNS name of the TensorFlow master output_dir: The output directory to which the metric summaries are written. eval_step_fns: A dictionary of a functions that return a list of [state, action, reward, discount, transition_type] tensors, indexed by summary tag name. model_rollout_fn: Model rollout fn. gamma: Discount factor for the reward. max_steps_per_episode: Maximum steps to run each episode for. num_episodes_eval: Number of episodes to evaluate and average reward over. num_episodes_videos: Number of episodes to record for video. tuner_hook: A callable(average reward, global step) that updates a Vizier tuner trial. generate_videos: Whether to generate videos of the agent in action. generate_summaries: Whether to generate summaries. video_settings: Settings for generating videos of the agent. Returns: A function that evaluates a checkpoint. """ sess = tf.Session(master, graph=tf.get_default_graph()) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) summary_writer = tf.summary.FileWriter(output_dir) def evaluate_checkpoint(checkpoint_path): """Performs a one-time evaluation of the given checkpoint. Args: checkpoint_path: Checkpoint to evaluate. Returns: True if the evaluation process should stop """ restore_fn = tf.contrib.framework.assign_from_checkpoint_fn( checkpoint_path, uvf_utils.get_all_vars(), ignore_missing_vars=True, reshape_variables=False) assert restore_fn is not None, 'cannot restore %s' % checkpoint_path restore_fn(sess) global_step = sess.run(slim.get_global_step()) should_stop = False max_reward = -1e10 max_meta_reward = -1e10 for eval_tag, (eval_step, env_base,) in sorted(eval_step_fns.items()): if hasattr(env_base, 'set_sess'): env_base.set_sess(sess) # set session if generate_summaries: tf.logging.info( '[%s] Computing average reward over %d episodes at global step %d.', eval_tag, num_episodes_eval, global_step) (average_reward, last_reward, average_meta_reward, last_meta_reward, average_success, states, actions) = eval_utils.compute_average_reward( sess, env_base, eval_step, gamma, max_steps_per_episode, num_episodes_eval) tf.logging.info('[%s] Average reward = %f', eval_tag, average_reward) tf.logging.info('[%s] Last reward = %f', eval_tag, last_reward) tf.logging.info('[%s] Average meta reward = %f', eval_tag, average_meta_reward) tf.logging.info('[%s] Last meta reward = %f', eval_tag, last_meta_reward) tf.logging.info('[%s] Average success = %f', eval_tag, average_success) if model_rollout_fn is not None: preds, model_losses = eval_utils.compute_model_loss( sess, model_rollout_fn, states, actions) for i, (pred, state, model_loss) in enumerate( zip(preds, states, model_losses)): tf.logging.info('[%s] Model rollout step %d: loss=%f', eval_tag, i, model_loss) tf.logging.info('[%s] Model rollout step %d: pred=%s', eval_tag, i, str(pred.tolist())) tf.logging.info('[%s] Model rollout step %d: state=%s', eval_tag, i, str(state.tolist())) # Report the eval stats to the tuner. if average_reward > max_reward: max_reward = average_reward if average_meta_reward > max_meta_reward: max_meta_reward = average_meta_reward for (tag, value) in [('Reward/average_%s_reward', average_reward), ('Reward/last_%s_reward', last_reward), ('Reward/average_%s_meta_reward', average_meta_reward), ('Reward/last_%s_meta_reward', last_meta_reward), ('Reward/average_%s_success', average_success)]: summary_str = tf.Summary(value=[ tf.Summary.Value( tag=tag % eval_tag, simple_value=value) ]) summary_writer.add_summary(summary_str, global_step) summary_writer.flush() if generate_videos or should_stop: # Do a manual reset before generating the video to see the initial # pose of the robot, towards which the reset controller is moving. if hasattr(env_base, '_gym_env'): tf.logging.info('Resetting before recording video') if hasattr(env_base._gym_env, 'reset_model'): env_base._gym_env.reset_model() # pylint: disable=protected-access else: env_base._gym_env.wrapped_env.reset_model() video_filename = os.path.join(output_dir, 'videos', '%s_step_%d.mp4' % (eval_tag, global_step)) eval_utils.capture_video(sess, eval_step, env_base, max_steps_per_episode * num_episodes_videos, video_filename, video_settings, reset_every=max_steps_per_episode) should_stop = should_stop or (generate_summaries and tuner_hook and tuner_hook(max_reward, global_step)) return bool(should_stop) return evaluate_checkpoint def get_model_rollout(uvf_agent, tf_env): """Model rollout function.""" state_spec = tf_env.observation_spec()[0] action_spec = tf_env.action_spec()[0] state_ph = tf.placeholder(dtype=state_spec.dtype, shape=state_spec.shape) action_ph = tf.placeholder(dtype=action_spec.dtype, shape=action_spec.shape) merged_state = uvf_agent.merged_state(state_ph) diff_value = uvf_agent.critic_net(tf.expand_dims(merged_state, 0), tf.expand_dims(action_ph, 0))[0] diff_value = tf.cast(diff_value, dtype=state_ph.dtype) state_ph.shape.assert_is_compatible_with(diff_value.shape) next_state = state_ph + diff_value def model_rollout_fn(sess, state, action): return sess.run(next_state, feed_dict={state_ph: state, action_ph: action}) return model_rollout_fn def get_eval_step(uvf_agent, state_preprocess, tf_env, action_fn, meta_action_fn, environment_steps, num_episodes, mode='eval'): """Get one-step policy/env stepping ops. Args: uvf_agent: A UVF agent. tf_env: A TFEnvironment. action_fn: A function to produce actions given current state. meta_action_fn: A function to produce meta actions given current state. environment_steps: A variable to count the number of steps in the tf_env. num_episodes: A variable to count the number of episodes. mode: a string representing the mode=[train, explore, eval]. Returns: A collect_experience_op that excute an action and store into the replay_buffer """ tf_env.start_collect() state = tf_env.current_obs() action = action_fn(state, context=None) state_repr = state_preprocess(state) action_spec = tf_env.action_spec() action_ph = tf.placeholder(dtype=action_spec.dtype, shape=action_spec.shape) with tf.control_dependencies([state]): transition_type, reward, discount = tf_env.step(action_ph) def increment_step(): return environment_steps.assign_add(1) def increment_episode(): return num_episodes.assign_add(1) def no_op_int(): return tf.constant(0, dtype=tf.int64) step_cond = uvf_agent.step_cond_fn(state, action, transition_type, environment_steps, num_episodes) reset_episode_cond = uvf_agent.reset_episode_cond_fn( state, action, transition_type, environment_steps, num_episodes) reset_env_cond = uvf_agent.reset_env_cond_fn(state, action, transition_type, environment_steps, num_episodes) increment_step_op = tf.cond(step_cond, increment_step, no_op_int) with tf.control_dependencies([increment_step_op]): increment_episode_op = tf.cond(reset_episode_cond, increment_episode, no_op_int) with tf.control_dependencies([reward, discount]): next_state = tf_env.current_obs() next_state_repr = state_preprocess(next_state) with tf.control_dependencies([increment_episode_op]): post_reward, post_meta_reward = uvf_agent.cond_begin_episode_op( tf.logical_not(reset_episode_cond), [state, action_ph, reward, next_state, state_repr, next_state_repr], mode=mode, meta_action_fn=meta_action_fn) # Important: do manual reset after getting the final reward from the # unreset environment. with tf.control_dependencies([post_reward, post_meta_reward]): cond_reset_op = tf.cond(reset_env_cond, tf_env.reset, tf_env.current_time_step) # Add a dummy control dependency to force the reset_op to run with tf.control_dependencies(cond_reset_op): post_reward, post_meta_reward = map(tf.identity, [post_reward, post_meta_reward]) eval_step = [next_state, action_ph, transition_type, post_reward, post_meta_reward, discount, uvf_agent.context_vars, state_repr] if callable(action): def step_fn(sess): action_value = action(sess) return sess.run(eval_step, feed_dict={action_ph: action_value}) else: action = uvf_utils.clip_to_spec(action, action_spec) def step_fn(sess): action_value = sess.run(action) return sess.run(eval_step, feed_dict={action_ph: action_value}) return step_fn @gin.configurable def evaluate(checkpoint_dir, eval_dir, environment=None, num_bin_actions=3, agent_class=None, meta_agent_class=None, state_preprocess_class=None, gamma=1.0, num_episodes_eval=10, eval_interval_secs=60, max_number_of_evaluations=None, checkpoint_timeout=None, timeout_fn=None, tuner_hook=None, generate_videos=False, generate_summaries=True, num_episodes_videos=5, video_settings=None, eval_modes=('eval',), eval_model_rollout=False, policy_save_dir='policy', checkpoint_range=None, checkpoint_path=None, max_steps_per_episode=None, evaluate_nohrl=False): """Loads and repeatedly evaluates a checkpointed model at a set interval. Args: checkpoint_dir: The directory where the checkpoints reside. eval_dir: Directory to save the evaluation summary results. environment: A BaseEnvironment to evaluate. num_bin_actions: Number of bins for discretizing continuous actions. agent_class: An RL agent class. meta_agent_class: A Meta agent class. gamma: Discount factor for the reward. num_episodes_eval: Number of episodes to evaluate and average reward over. eval_interval_secs: The number of seconds between each evaluation run. max_number_of_evaluations: The max number of evaluations. If None the evaluation continues indefinitely. checkpoint_timeout: The maximum amount of time to wait between checkpoints. If left as `None`, then the process will wait indefinitely. timeout_fn: Optional function to call after a timeout. tuner_hook: A callable that takes the average reward and global step and updates a Vizier tuner trial. generate_videos: Whether to generate videos of the agent in action. generate_summaries: Whether to generate summaries. num_episodes_videos: Number of episodes to evaluate for generating videos. video_settings: Settings for generating videos of the agent. optimal action based on the critic. eval_modes: A tuple of eval modes. eval_model_rollout: Evaluate model rollout. policy_save_dir: Optional sub-directory where the policies are saved. checkpoint_range: Optional. If provided, evaluate all checkpoints in the range. checkpoint_path: Optional sub-directory specifying which checkpoint to evaluate. If None, will evaluate the most recent checkpoint. """ tf_env = create_maze_env.TFPyEnvironment(environment) observation_spec = [tf_env.observation_spec()] action_spec = [tf_env.action_spec()] assert max_steps_per_episode, 'max_steps_per_episode need to be set' if agent_class.ACTION_TYPE == 'discrete': assert False else: assert agent_class.ACTION_TYPE == 'continuous' if meta_agent_class is not None: assert agent_class.ACTION_TYPE == meta_agent_class.ACTION_TYPE with tf.variable_scope('meta_agent'): meta_agent = meta_agent_class( observation_spec, action_spec, tf_env, ) else: meta_agent = None with tf.variable_scope('uvf_agent'): uvf_agent = agent_class( observation_spec, action_spec, tf_env, ) uvf_agent.set_meta_agent(agent=meta_agent) with tf.variable_scope('state_preprocess'): state_preprocess = state_preprocess_class() # run both actor and critic once to ensure networks are initialized # and gin configs will be saved # pylint: disable=protected-access temp_states = tf.expand_dims( tf.zeros( dtype=uvf_agent._observation_spec.dtype, shape=uvf_agent._observation_spec.shape), 0) # pylint: enable=protected-access temp_actions = uvf_agent.actor_net(temp_states) uvf_agent.critic_net(temp_states, temp_actions) # create eval_step_fns for each action function eval_step_fns = dict() meta_agent = uvf_agent.meta_agent for meta in [True] + [False] * evaluate_nohrl: meta_tag = 'hrl' if meta else 'nohrl' uvf_agent.set_meta_agent(meta_agent if meta else None) for mode in eval_modes: # wrap environment wrapped_environment = uvf_agent.get_env_base_wrapper( environment, mode=mode) action_wrapper = lambda agent_: agent_.action action_fn = action_wrapper(uvf_agent) meta_action_fn = action_wrapper(meta_agent) eval_step_fns['%s_%s' % (mode, meta_tag)] = (get_eval_step( uvf_agent=uvf_agent, state_preprocess=state_preprocess, tf_env=tf_env, action_fn=action_fn, meta_action_fn=meta_action_fn, environment_steps=tf.Variable( 0, dtype=tf.int64, name='environment_steps'), num_episodes=tf.Variable(0, dtype=tf.int64, name='num_episodes'), mode=mode), wrapped_environment,) model_rollout_fn = None if eval_model_rollout: model_rollout_fn = get_model_rollout(uvf_agent, tf_env) tf.train.get_or_create_global_step() if policy_save_dir: checkpoint_dir = os.path.join(checkpoint_dir, policy_save_dir) tf.logging.info('Evaluating policies at %s', checkpoint_dir) tf.logging.info('Running episodes for max %d steps', max_steps_per_episode) evaluate_checkpoint_fn = get_evaluate_checkpoint_fn( '', eval_dir, eval_step_fns, model_rollout_fn, gamma, max_steps_per_episode, num_episodes_eval, num_episodes_videos, tuner_hook, generate_videos, generate_summaries, video_settings) if checkpoint_path is not None: checkpoint_path = os.path.join(checkpoint_dir, checkpoint_path) evaluate_checkpoint_fn(checkpoint_path) elif checkpoint_range is not None: model_files = tf.gfile.Glob( os.path.join(checkpoint_dir, 'model.ckpt-*.index')) tf.logging.info('Found %s policies at %s', len(model_files), checkpoint_dir) model_files = { int(f.split('model.ckpt-', 1)[1].split('.', 1)[0]): os.path.splitext(f)[0] for f in model_files } model_files = { k: v for k, v in model_files.items() if k >= checkpoint_range[0] and k <= checkpoint_range[1] } tf.logging.info('Evaluating %d policies at %s', len(model_files), checkpoint_dir) for _, checkpoint_path in sorted(model_files.items()): evaluate_checkpoint_fn(checkpoint_path) else: eval_utils.evaluate_checkpoint_repeatedly( checkpoint_dir, evaluate_checkpoint_fn, eval_interval_secs=eval_interval_secs, max_number_of_evaluations=max_number_of_evaluations, checkpoint_timeout=checkpoint_timeout, timeout_fn=timeout_fn)
18,538
39.214751
131
py
models
models-master/research/efficient-hrl/train.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Script for training an RL agent using the UVF algorithm. To run locally: See run_train.py """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import tensorflow as tf slim = tf.contrib.slim import gin.tf # pylint: disable=unused-import import train_utils import agent as agent_ from agents import circular_buffer from utils import utils as uvf_utils from environments import create_maze_env # pylint: enable=unused-import flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('goal_sample_strategy', 'sample', 'None, sample, FuN') LOAD_PATH = None def collect_experience(tf_env, agent, meta_agent, state_preprocess, replay_buffer, meta_replay_buffer, action_fn, meta_action_fn, environment_steps, num_episodes, num_resets, episode_rewards, episode_meta_rewards, store_context, disable_agent_reset): """Collect experience in a tf_env into a replay_buffer using action_fn. Args: tf_env: A TFEnvironment. agent: A UVF agent. meta_agent: A Meta Agent. replay_buffer: A Replay buffer to collect experience in. meta_replay_buffer: A Replay buffer to collect meta agent experience in. action_fn: A function to produce actions given current state. meta_action_fn: A function to produce meta actions given current state. environment_steps: A variable to count the number of steps in the tf_env. num_episodes: A variable to count the number of episodes. num_resets: A variable to count the number of resets. store_context: A boolean to check if store context in replay. disable_agent_reset: A boolean that disables agent from resetting. Returns: A collect_experience_op that excute an action and store into the replay_buffers """ tf_env.start_collect() state = tf_env.current_obs() state_repr = state_preprocess(state) action = action_fn(state, context=None) with tf.control_dependencies([state]): transition_type, reward, discount = tf_env.step(action) def increment_step(): return environment_steps.assign_add(1) def increment_episode(): return num_episodes.assign_add(1) def increment_reset(): return num_resets.assign_add(1) def update_episode_rewards(context_reward, meta_reward, reset): new_episode_rewards = tf.concat( [episode_rewards[:1] + context_reward, episode_rewards[1:]], 0) new_episode_meta_rewards = tf.concat( [episode_meta_rewards[:1] + meta_reward, episode_meta_rewards[1:]], 0) return tf.group( episode_rewards.assign( tf.cond(reset, lambda: tf.concat([[0.], episode_rewards[:-1]], 0), lambda: new_episode_rewards)), episode_meta_rewards.assign( tf.cond(reset, lambda: tf.concat([[0.], episode_meta_rewards[:-1]], 0), lambda: new_episode_meta_rewards))) def no_op_int(): return tf.constant(0, dtype=tf.int64) step_cond = agent.step_cond_fn(state, action, transition_type, environment_steps, num_episodes) reset_episode_cond = agent.reset_episode_cond_fn( state, action, transition_type, environment_steps, num_episodes) reset_env_cond = agent.reset_env_cond_fn(state, action, transition_type, environment_steps, num_episodes) increment_step_op = tf.cond(step_cond, increment_step, no_op_int) increment_episode_op = tf.cond(reset_episode_cond, increment_episode, no_op_int) increment_reset_op = tf.cond(reset_env_cond, increment_reset, no_op_int) increment_op = tf.group(increment_step_op, increment_episode_op, increment_reset_op) with tf.control_dependencies([increment_op, reward, discount]): next_state = tf_env.current_obs() next_state_repr = state_preprocess(next_state) next_reset_episode_cond = tf.logical_or( agent.reset_episode_cond_fn( state, action, transition_type, environment_steps, num_episodes), tf.equal(discount, 0.0)) if store_context: context = [tf.identity(var) + tf.zeros_like(var) for var in agent.context_vars] meta_context = [tf.identity(var) + tf.zeros_like(var) for var in meta_agent.context_vars] else: context = [] meta_context = [] with tf.control_dependencies([next_state] + context + meta_context): if disable_agent_reset: collect_experience_ops = [tf.no_op()] # don't reset agent else: collect_experience_ops = agent.cond_begin_episode_op( tf.logical_not(reset_episode_cond), [state, action, reward, next_state, state_repr, next_state_repr], mode='explore', meta_action_fn=meta_action_fn) context_reward, meta_reward = collect_experience_ops collect_experience_ops = list(collect_experience_ops) collect_experience_ops.append( update_episode_rewards(tf.reduce_sum(context_reward), meta_reward, reset_episode_cond)) meta_action_every_n = agent.tf_context.meta_action_every_n with tf.control_dependencies(collect_experience_ops): transition = [state, action, reward, discount, next_state] meta_action = tf.to_float( tf.concat(context, -1)) # Meta agent action is low-level context meta_end = tf.logical_and( # End of meta-transition. tf.equal(agent.tf_context.t % meta_action_every_n, 1), agent.tf_context.t > 1) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): states_var = tf.get_variable('states_var', [meta_action_every_n, state.shape[-1]], state.dtype) actions_var = tf.get_variable('actions_var', [meta_action_every_n, action.shape[-1]], action.dtype) state_var = tf.get_variable('state_var', state.shape, state.dtype) reward_var = tf.get_variable('reward_var', reward.shape, reward.dtype) meta_action_var = tf.get_variable('meta_action_var', meta_action.shape, meta_action.dtype) meta_context_var = [ tf.get_variable('meta_context_var%d' % idx, meta_context[idx].shape, meta_context[idx].dtype) for idx in range(len(meta_context))] actions_var_upd = tf.scatter_update( actions_var, (agent.tf_context.t - 2) % meta_action_every_n, action) with tf.control_dependencies([actions_var_upd]): actions = tf.identity(actions_var) + tf.zeros_like(actions_var) meta_reward = tf.identity(meta_reward) + tf.zeros_like(meta_reward) meta_reward = tf.reshape(meta_reward, reward.shape) reward = 0.1 * meta_reward meta_transition = [state_var, meta_action_var, reward_var + reward, discount * (1 - tf.to_float(next_reset_episode_cond)), next_state] meta_transition.extend([states_var, actions]) if store_context: # store current and next context into replay transition += context + list(agent.context_vars) meta_transition += meta_context_var + list(meta_agent.context_vars) meta_step_cond = tf.squeeze(tf.logical_and(step_cond, tf.logical_or(next_reset_episode_cond, meta_end))) collect_experience_op = tf.group( replay_buffer.maybe_add(transition, step_cond), meta_replay_buffer.maybe_add(meta_transition, meta_step_cond), ) with tf.control_dependencies([collect_experience_op]): collect_experience_op = tf.cond(reset_env_cond, tf_env.reset, tf_env.current_time_step) meta_period = tf.equal(agent.tf_context.t % meta_action_every_n, 1) states_var_upd = tf.scatter_update( states_var, (agent.tf_context.t - 1) % meta_action_every_n, next_state) state_var_upd = tf.assign( state_var, tf.cond(meta_period, lambda: next_state, lambda: state_var)) reward_var_upd = tf.assign( reward_var, tf.cond(meta_period, lambda: tf.zeros_like(reward_var), lambda: reward_var + reward)) meta_action = tf.to_float(tf.concat(agent.context_vars, -1)) meta_action_var_upd = tf.assign( meta_action_var, tf.cond(meta_period, lambda: meta_action, lambda: meta_action_var)) meta_context_var_upd = [ tf.assign( meta_context_var[idx], tf.cond(meta_period, lambda: meta_agent.context_vars[idx], lambda: meta_context_var[idx])) for idx in range(len(meta_context))] return tf.group( collect_experience_op, states_var_upd, state_var_upd, reward_var_upd, meta_action_var_upd, *meta_context_var_upd) def sample_best_meta_actions(state_reprs, next_state_reprs, prev_meta_actions, low_states, low_actions, low_state_reprs, inverse_dynamics, uvf_agent, k=10): """Return meta-actions which approximately maximize low-level log-probs.""" sampled_actions = inverse_dynamics.sample(state_reprs, next_state_reprs, k, prev_meta_actions) sampled_actions = tf.stop_gradient(sampled_actions) sampled_log_probs = tf.reshape(uvf_agent.log_probs( tf.tile(low_states, [k, 1, 1]), tf.tile(low_actions, [k, 1, 1]), tf.tile(low_state_reprs, [k, 1, 1]), [tf.reshape(sampled_actions, [-1, sampled_actions.shape[-1]])]), [k, low_states.shape[0], low_states.shape[1], -1]) fitness = tf.reduce_sum(sampled_log_probs, [2, 3]) best_actions = tf.argmax(fitness, 0) actions = tf.gather_nd( sampled_actions, tf.stack([best_actions, tf.range(prev_meta_actions.shape[0], dtype=tf.int64)], -1)) return actions @gin.configurable def train_uvf(train_dir, environment=None, num_bin_actions=3, agent_class=None, meta_agent_class=None, state_preprocess_class=None, inverse_dynamics_class=None, exp_action_wrapper=None, replay_buffer=None, meta_replay_buffer=None, replay_num_steps=1, meta_replay_num_steps=1, critic_optimizer=None, actor_optimizer=None, meta_critic_optimizer=None, meta_actor_optimizer=None, repr_optimizer=None, relabel_contexts=False, meta_relabel_contexts=False, batch_size=64, repeat_size=0, num_episodes_train=2000, initial_episodes=2, initial_steps=None, num_updates_per_observation=1, num_collect_per_update=1, num_collect_per_meta_update=1, gamma=1.0, meta_gamma=1.0, reward_scale_factor=1.0, target_update_period=1, should_stop_early=None, clip_gradient_norm=0.0, summarize_gradients=False, debug_summaries=False, log_every_n_steps=100, prefetch_queue_capacity=2, policy_save_dir='policy', save_policy_every_n_steps=1000, save_policy_interval_secs=0, replay_context_ratio=0.0, next_state_as_context_ratio=0.0, state_index=0, zero_timer_ratio=0.0, timer_index=-1, debug=False, max_policies_to_save=None, max_steps_per_episode=None, load_path=LOAD_PATH): """Train an agent.""" tf_env = create_maze_env.TFPyEnvironment(environment) observation_spec = [tf_env.observation_spec()] action_spec = [tf_env.action_spec()] max_steps_per_episode = max_steps_per_episode or tf_env.pyenv.max_episode_steps assert max_steps_per_episode, 'max_steps_per_episode need to be set' if initial_steps is None: initial_steps = initial_episodes * max_steps_per_episode if agent_class.ACTION_TYPE == 'discrete': assert False else: assert agent_class.ACTION_TYPE == 'continuous' assert agent_class.ACTION_TYPE == meta_agent_class.ACTION_TYPE with tf.variable_scope('meta_agent'): meta_agent = meta_agent_class( observation_spec, action_spec, tf_env, debug_summaries=debug_summaries) meta_agent.set_replay(replay=meta_replay_buffer) with tf.variable_scope('uvf_agent'): uvf_agent = agent_class( observation_spec, action_spec, tf_env, debug_summaries=debug_summaries) uvf_agent.set_meta_agent(agent=meta_agent) uvf_agent.set_replay(replay=replay_buffer) with tf.variable_scope('state_preprocess'): state_preprocess = state_preprocess_class() with tf.variable_scope('inverse_dynamics'): inverse_dynamics = inverse_dynamics_class( meta_agent.sub_context_as_action_specs[0]) # Create counter variables global_step = tf.contrib.framework.get_or_create_global_step() num_episodes = tf.Variable(0, dtype=tf.int64, name='num_episodes') num_resets = tf.Variable(0, dtype=tf.int64, name='num_resets') num_updates = tf.Variable(0, dtype=tf.int64, name='num_updates') num_meta_updates = tf.Variable(0, dtype=tf.int64, name='num_meta_updates') episode_rewards = tf.Variable([0.] * 100, name='episode_rewards') episode_meta_rewards = tf.Variable([0.] * 100, name='episode_meta_rewards') # Create counter variables summaries train_utils.create_counter_summaries([ ('environment_steps', global_step), ('num_episodes', num_episodes), ('num_resets', num_resets), ('num_updates', num_updates), ('num_meta_updates', num_meta_updates), ('replay_buffer_adds', replay_buffer.get_num_adds()), ('meta_replay_buffer_adds', meta_replay_buffer.get_num_adds()), ]) tf.summary.scalar('avg_episode_rewards', tf.reduce_mean(episode_rewards[1:])) tf.summary.scalar('avg_episode_meta_rewards', tf.reduce_mean(episode_meta_rewards[1:])) tf.summary.histogram('episode_rewards', episode_rewards[1:]) tf.summary.histogram('episode_meta_rewards', episode_meta_rewards[1:]) # Create init ops action_fn = uvf_agent.action action_fn = uvf_agent.add_noise_fn(action_fn, global_step=None) meta_action_fn = meta_agent.action meta_action_fn = meta_agent.add_noise_fn(meta_action_fn, global_step=None) meta_actions_fn = meta_agent.actions meta_actions_fn = meta_agent.add_noise_fn(meta_actions_fn, global_step=None) init_collect_experience_op = collect_experience( tf_env, uvf_agent, meta_agent, state_preprocess, replay_buffer, meta_replay_buffer, action_fn, meta_action_fn, environment_steps=global_step, num_episodes=num_episodes, num_resets=num_resets, episode_rewards=episode_rewards, episode_meta_rewards=episode_meta_rewards, store_context=True, disable_agent_reset=False, ) # Create train ops collect_experience_op = collect_experience( tf_env, uvf_agent, meta_agent, state_preprocess, replay_buffer, meta_replay_buffer, action_fn, meta_action_fn, environment_steps=global_step, num_episodes=num_episodes, num_resets=num_resets, episode_rewards=episode_rewards, episode_meta_rewards=episode_meta_rewards, store_context=True, disable_agent_reset=False, ) train_op_list = [] repr_train_op = tf.constant(0.0) for mode in ['meta', 'nometa']: if mode == 'meta': agent = meta_agent buff = meta_replay_buffer critic_opt = meta_critic_optimizer actor_opt = meta_actor_optimizer relabel = meta_relabel_contexts num_steps = meta_replay_num_steps my_gamma = meta_gamma, n_updates = num_meta_updates else: agent = uvf_agent buff = replay_buffer critic_opt = critic_optimizer actor_opt = actor_optimizer relabel = relabel_contexts num_steps = replay_num_steps my_gamma = gamma n_updates = num_updates with tf.name_scope(mode): batch = buff.get_random_batch(batch_size, num_steps=num_steps) states, actions, rewards, discounts, next_states = batch[:5] with tf.name_scope('Reward'): tf.summary.scalar('average_step_reward', tf.reduce_mean(rewards)) rewards *= reward_scale_factor batch_queue = slim.prefetch_queue.prefetch_queue( [states, actions, rewards, discounts, next_states] + batch[5:], capacity=prefetch_queue_capacity, name='batch_queue') batch_dequeue = batch_queue.dequeue() if repeat_size > 0: batch_dequeue = [ tf.tile(batch, (repeat_size+1,) + (1,) * (batch.shape.ndims - 1)) for batch in batch_dequeue ] batch_size *= (repeat_size + 1) states, actions, rewards, discounts, next_states = batch_dequeue[:5] if mode == 'meta': low_states = batch_dequeue[5] low_actions = batch_dequeue[6] low_state_reprs = state_preprocess(low_states) state_reprs = state_preprocess(states) next_state_reprs = state_preprocess(next_states) if mode == 'meta': # Re-label meta-action prev_actions = actions if FLAGS.goal_sample_strategy == 'None': pass elif FLAGS.goal_sample_strategy == 'FuN': actions = inverse_dynamics.sample(state_reprs, next_state_reprs, 1, prev_actions, sc=0.1) actions = tf.stop_gradient(actions) elif FLAGS.goal_sample_strategy == 'sample': actions = sample_best_meta_actions(state_reprs, next_state_reprs, prev_actions, low_states, low_actions, low_state_reprs, inverse_dynamics, uvf_agent, k=10) else: assert False if state_preprocess.trainable and mode == 'meta': # Representation learning is based on meta-transitions, but is trained # along with low-level policy updates. repr_loss, _, _ = state_preprocess.loss(states, next_states, low_actions, low_states) repr_train_op = slim.learning.create_train_op( repr_loss, repr_optimizer, global_step=None, update_ops=None, summarize_gradients=summarize_gradients, clip_gradient_norm=clip_gradient_norm, variables_to_train=state_preprocess.get_trainable_vars(),) # Get contexts for training contexts, next_contexts = agent.sample_contexts( mode='train', batch_size=batch_size, state=states, next_state=next_states, ) if not relabel: # Re-label context (in the style of TDM or HER). contexts, next_contexts = ( batch_dequeue[-2*len(contexts):-1*len(contexts)], batch_dequeue[-1*len(contexts):]) merged_states = agent.merged_states(states, contexts) merged_next_states = agent.merged_states(next_states, next_contexts) if mode == 'nometa': context_rewards, context_discounts = agent.compute_rewards( 'train', state_reprs, actions, rewards, next_state_reprs, contexts) elif mode == 'meta': # Meta-agent uses sum of rewards, not context-specific rewards. _, context_discounts = agent.compute_rewards( 'train', states, actions, rewards, next_states, contexts) context_rewards = rewards if agent.gamma_index is not None: context_discounts *= tf.cast( tf.reshape(contexts[agent.gamma_index], (-1,)), dtype=context_discounts.dtype) else: context_discounts *= my_gamma critic_loss = agent.critic_loss(merged_states, actions, context_rewards, context_discounts, merged_next_states) critic_loss = tf.reduce_mean(critic_loss) actor_loss = agent.actor_loss(merged_states, actions, context_rewards, context_discounts, merged_next_states) actor_loss *= tf.to_float( # Only update actor every N steps. tf.equal(n_updates % target_update_period, 0)) critic_train_op = slim.learning.create_train_op( critic_loss, critic_opt, global_step=n_updates, update_ops=None, summarize_gradients=summarize_gradients, clip_gradient_norm=clip_gradient_norm, variables_to_train=agent.get_trainable_critic_vars(),) critic_train_op = uvf_utils.tf_print( critic_train_op, [critic_train_op], message='critic_loss', print_freq=1000, name='critic_loss') train_op_list.append(critic_train_op) if actor_loss is not None: actor_train_op = slim.learning.create_train_op( actor_loss, actor_opt, global_step=None, update_ops=None, summarize_gradients=summarize_gradients, clip_gradient_norm=clip_gradient_norm, variables_to_train=agent.get_trainable_actor_vars(),) actor_train_op = uvf_utils.tf_print( actor_train_op, [actor_train_op], message='actor_loss', print_freq=1000, name='actor_loss') train_op_list.append(actor_train_op) assert len(train_op_list) == 4 # Update targets should happen after the networks have been updated. with tf.control_dependencies(train_op_list[2:]): update_targets_op = uvf_utils.periodically( uvf_agent.update_targets, target_update_period, 'update_targets') if meta_agent is not None: with tf.control_dependencies(train_op_list[:2]): update_meta_targets_op = uvf_utils.periodically( meta_agent.update_targets, target_update_period, 'update_targets') assert_op = tf.Assert( # Hack to get training to stop. tf.less_equal(global_step, 200 + num_episodes_train * max_steps_per_episode), [global_step]) with tf.control_dependencies([update_targets_op, assert_op]): train_op = tf.add_n(train_op_list[2:], name='post_update_targets') # Representation training steps on every low-level policy training step. train_op += repr_train_op with tf.control_dependencies([update_meta_targets_op, assert_op]): meta_train_op = tf.add_n(train_op_list[:2], name='post_update_meta_targets') if debug_summaries: train_.gen_debug_batch_summaries(batch) slim.summaries.add_histogram_summaries( uvf_agent.get_trainable_critic_vars(), 'critic_vars') slim.summaries.add_histogram_summaries( uvf_agent.get_trainable_actor_vars(), 'actor_vars') train_ops = train_utils.TrainOps(train_op, meta_train_op, collect_experience_op) policy_save_path = os.path.join(train_dir, policy_save_dir, 'model.ckpt') policy_vars = uvf_agent.get_actor_vars() + meta_agent.get_actor_vars() + [ global_step, num_episodes, num_resets ] + list(uvf_agent.context_vars) + list(meta_agent.context_vars) + state_preprocess.get_trainable_vars() # add critic vars, since some test evaluation depends on them policy_vars += uvf_agent.get_trainable_critic_vars() + meta_agent.get_trainable_critic_vars() policy_saver = tf.train.Saver( policy_vars, max_to_keep=max_policies_to_save, sharded=False) lowlevel_vars = (uvf_agent.get_actor_vars() + uvf_agent.get_trainable_critic_vars() + state_preprocess.get_trainable_vars()) lowlevel_saver = tf.train.Saver(lowlevel_vars) def policy_save_fn(sess): policy_saver.save( sess, policy_save_path, global_step=global_step, write_meta_graph=False) if save_policy_interval_secs > 0: tf.logging.info( 'Wait %d secs after save policy.' % save_policy_interval_secs) time.sleep(save_policy_interval_secs) train_step_fn = train_utils.TrainStep( max_number_of_steps=num_episodes_train * max_steps_per_episode + 100, num_updates_per_observation=num_updates_per_observation, num_collect_per_update=num_collect_per_update, num_collect_per_meta_update=num_collect_per_meta_update, log_every_n_steps=log_every_n_steps, policy_save_fn=policy_save_fn, save_policy_every_n_steps=save_policy_every_n_steps, should_stop_early=should_stop_early).train_step local_init_op = tf.local_variables_initializer() init_targets_op = tf.group(uvf_agent.update_targets(1.0), meta_agent.update_targets(1.0)) def initialize_training_fn(sess): """Initialize training function.""" sess.run(local_init_op) sess.run(init_targets_op) if load_path: tf.logging.info('Restoring low-level from %s' % load_path) lowlevel_saver.restore(sess, load_path) global_step_value = sess.run(global_step) assert global_step_value == 0, 'Global step should be zero.' collect_experience_call = sess.make_callable( init_collect_experience_op) for _ in range(initial_steps): collect_experience_call() train_saver = tf.train.Saver(max_to_keep=2, sharded=True) tf.logging.info('train dir: %s', train_dir) return slim.learning.train( train_ops, train_dir, train_step_fn=train_step_fn, save_interval_secs=FLAGS.save_interval_secs, saver=train_saver, log_every_n_steps=0, global_step=global_step, master="", is_chief=(FLAGS.task == 0), save_summaries_secs=FLAGS.save_summaries_secs, init_fn=initialize_training_fn)
26,894
39.081967
108
py
models
models-master/research/efficient-hrl/run_train.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Script for training an RL agent using the UVF algorithm. To run locally: See scripts/local_train.py """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import gin.tf # pylint: enable=unused-import import train # pylint: disable=unused-import flags = tf.app.flags FLAGS = flags.FLAGS def main(_): tf.logging.set_verbosity(tf.logging.INFO) if FLAGS.config_file: for config_file in FLAGS.config_file: gin.parse_config_file(config_file) if FLAGS.params: gin.parse_config(FLAGS.params) assert FLAGS.train_dir, "Flag 'train_dir' must be set." return train.train_uvf(FLAGS.train_dir) if __name__ == '__main__': tf.app.run()
1,431
27.64
80
py
models
models-master/research/efficient-hrl/scripts/local_eval.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Script to run run_eval.py locally. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from subprocess import call import sys CONFIGS_PATH = 'configs' CONTEXT_CONFIGS_PATH = 'context/configs' def main(): bb = './' base_num_args = 6 if len(sys.argv) < base_num_args: print( "usage: python %s <exp_name> <context_setting_gin> <context_gin> " "<agent_gin> <suite> [params...]" % sys.argv[0]) sys.exit(0) exp = sys.argv[1] context_setting = sys.argv[2] context = sys.argv[3] agent = sys.argv[4] assert sys.argv[5] in ["suite"], "args[5] must be `suite'" suite = "" binary = "python {bb}/run_eval{suite}.py ".format(bb=bb, suite=suite) h = os.environ["HOME"] ucp = CONFIGS_PATH ccp = CONTEXT_CONFIGS_PATH extra = '' command_str = ("{binary} " "--logtostderr " "--checkpoint_dir={h}/tmp/{context_setting}/{context}/{agent}/{exp}/train " "--eval_dir={h}/tmp/{context_setting}/{context}/{agent}/{exp}/eval " "--config_file={ucp}/{agent}.gin " "--config_file={ucp}/eval_{extra}uvf.gin " "--config_file={ccp}/{context_setting}.gin " "--config_file={ccp}/{context}.gin ").format( h=h, ucp=ucp, ccp=ccp, context_setting=context_setting, context=context, agent=agent, extra=extra, suite=suite, exp=exp, binary=binary) for extra_arg in sys.argv[base_num_args:]: command_str += "--params='%s' " % extra_arg print(command_str) call(command_str, shell=True) if __name__ == "__main__": main()
2,548
32.103896
92
py
models
models-master/research/efficient-hrl/scripts/local_train.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Script to run run_train.py locally. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random from subprocess import call import sys CONFIGS_PATH = './configs' CONTEXT_CONFIGS_PATH = './context/configs' def main(): bb = '.' base_num_args = 6 if len(sys.argv) < base_num_args: print( "usage: python %s <exp_name> <context_setting_gin> <env_context_gin> " "<agent_gin> <suite> [params...]" % sys.argv[0]) sys.exit(0) exp = sys.argv[1] # Name for experiment, e.g. 'test001' context_setting = sys.argv[2] # Context setting, e.g. 'hiro_orig' context = sys.argv[3] # Environment-specific context, e.g. 'ant_maze' agent = sys.argv[4] # Agent settings, e.g. 'base_uvf' assert sys.argv[5] in ["suite"], "args[5] must be `suite'" suite = "" binary = "python {bb}/run_train{suite}.py ".format(bb=bb, suite=suite) h = os.environ["HOME"] ucp = CONFIGS_PATH ccp = CONTEXT_CONFIGS_PATH extra = '' port = random.randint(2000, 8000) command_str = ("{binary} " "--train_dir={h}/tmp/{context_setting}/{context}/{agent}/{exp}/train " "--config_file={ucp}/{agent}.gin " "--config_file={ucp}/train_{extra}uvf.gin " "--config_file={ccp}/{context_setting}.gin " "--config_file={ccp}/{context}.gin " "--summarize_gradients=False " "--save_interval_secs=60 " "--save_summaries_secs=1 " "--master=local " "--alsologtostderr ").format(h=h, ucp=ucp, context_setting=context_setting, context=context, ccp=ccp, suite=suite, agent=agent, extra=extra, exp=exp, binary=binary, port=port) for extra_arg in sys.argv[base_num_args:]: command_str += "--params='%s' " % extra_arg print(command_str) call(command_str, shell=True) if __name__ == "__main__": main()
2,882
36.441558
87
py
models
models-master/research/efficient-hrl/agents/ddpg_networks.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sample actor(policy) and critic(q) networks to use with DDPG/NAF agents. The DDPG networks are defined in "Section 7: Experiment Details" of "Continuous control with deep reinforcement learning" - Lilicrap et al. https://arxiv.org/abs/1509.02971 The NAF critic network is based on "Section 4" of "Continuous deep Q-learning with model-based acceleration" - Gu et al. https://arxiv.org/pdf/1603.00748. """ import tensorflow as tf slim = tf.contrib.slim import gin.tf @gin.configurable('ddpg_critic_net') def critic_net(states, actions, for_critic_loss=False, num_reward_dims=1, states_hidden_layers=(400,), actions_hidden_layers=None, joint_hidden_layers=(300,), weight_decay=0.0001, normalizer_fn=None, activation_fn=tf.nn.relu, zero_obs=False, images=False): """Creates a critic that returns q values for the given states and actions. Args: states: (castable to tf.float32) a [batch_size, num_state_dims] tensor representing a batch of states. actions: (castable to tf.float32) a [batch_size, num_action_dims] tensor representing a batch of actions. num_reward_dims: Number of reward dimensions. states_hidden_layers: tuple of hidden layers units for states. actions_hidden_layers: tuple of hidden layers units for actions. joint_hidden_layers: tuple of hidden layers units after joining states and actions using tf.concat(). weight_decay: Weight decay for l2 weights regularizer. normalizer_fn: Normalizer function, i.e. slim.layer_norm, activation_fn: Activation function, i.e. tf.nn.relu, slim.leaky_relu, ... Returns: A tf.float32 [batch_size] tensor of q values, or a tf.float32 [batch_size, num_reward_dims] tensor of vector q values if num_reward_dims > 1. """ with slim.arg_scope( [slim.fully_connected], activation_fn=activation_fn, normalizer_fn=normalizer_fn, weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer( factor=1.0/3.0, mode='FAN_IN', uniform=True)): orig_states = tf.to_float(states) #states = tf.to_float(states) states = tf.concat([tf.to_float(states), tf.to_float(actions)], -1) #TD3 if images or zero_obs: states *= tf.constant([0.0] * 2 + [1.0] * (states.shape[1] - 2)) #LALA actions = tf.to_float(actions) if states_hidden_layers: states = slim.stack(states, slim.fully_connected, states_hidden_layers, scope='states') if actions_hidden_layers: actions = slim.stack(actions, slim.fully_connected, actions_hidden_layers, scope='actions') joint = tf.concat([states, actions], 1) if joint_hidden_layers: joint = slim.stack(joint, slim.fully_connected, joint_hidden_layers, scope='joint') with slim.arg_scope([slim.fully_connected], weights_regularizer=None, weights_initializer=tf.random_uniform_initializer( minval=-0.003, maxval=0.003)): value = slim.fully_connected(joint, num_reward_dims, activation_fn=None, normalizer_fn=None, scope='q_value') if num_reward_dims == 1: value = tf.reshape(value, [-1]) if not for_critic_loss and num_reward_dims > 1: value = tf.reduce_sum( value * tf.abs(orig_states[:, -num_reward_dims:]), -1) return value @gin.configurable('ddpg_actor_net') def actor_net(states, action_spec, hidden_layers=(400, 300), normalizer_fn=None, activation_fn=tf.nn.relu, zero_obs=False, images=False): """Creates an actor that returns actions for the given states. Args: states: (castable to tf.float32) a [batch_size, num_state_dims] tensor representing a batch of states. action_spec: (BoundedTensorSpec) A tensor spec indicating the shape and range of actions. hidden_layers: tuple of hidden layers units. normalizer_fn: Normalizer function, i.e. slim.layer_norm, activation_fn: Activation function, i.e. tf.nn.relu, slim.leaky_relu, ... Returns: A tf.float32 [batch_size, num_action_dims] tensor of actions. """ with slim.arg_scope( [slim.fully_connected], activation_fn=activation_fn, normalizer_fn=normalizer_fn, weights_initializer=slim.variance_scaling_initializer( factor=1.0/3.0, mode='FAN_IN', uniform=True)): states = tf.to_float(states) orig_states = states if images or zero_obs: # Zero-out x, y position. Hacky. states *= tf.constant([0.0] * 2 + [1.0] * (states.shape[1] - 2)) if hidden_layers: states = slim.stack(states, slim.fully_connected, hidden_layers, scope='states') with slim.arg_scope([slim.fully_connected], weights_initializer=tf.random_uniform_initializer( minval=-0.003, maxval=0.003)): actions = slim.fully_connected(states, action_spec.shape.num_elements(), scope='actions', normalizer_fn=None, activation_fn=tf.nn.tanh) action_means = (action_spec.maximum + action_spec.minimum) / 2.0 action_magnitudes = (action_spec.maximum - action_spec.minimum) / 2.0 actions = action_means + action_magnitudes * actions return actions
6,429
41.582781
80
py
models
models-master/research/efficient-hrl/agents/ddpg_agent.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A DDPG/NAF agent. Implements the Deep Deterministic Policy Gradient (DDPG) algorithm from "Continuous control with deep reinforcement learning" - Lilicrap et al. https://arxiv.org/abs/1509.02971, and the Normalized Advantage Functions (NAF) algorithm "Continuous Deep Q-Learning with Model-based Acceleration" - Gu et al. https://arxiv.org/pdf/1603.00748. """ import tensorflow as tf slim = tf.contrib.slim import gin.tf from utils import utils from agents import ddpg_networks as networks @gin.configurable class DdpgAgent(object): """An RL agent that learns using the DDPG algorithm. Example usage: def critic_net(states, actions): ... def actor_net(states, num_action_dims): ... Given a tensorflow environment tf_env, (of type learning.deepmind.rl.environments.tensorflow.python.tfpyenvironment) obs_spec = tf_env.observation_spec() action_spec = tf_env.action_spec() ddpg_agent = agent.DdpgAgent(obs_spec, action_spec, actor_net=actor_net, critic_net=critic_net) we can perform actions on the environment as follows: state = tf_env.observations()[0] action = ddpg_agent.actor_net(tf.expand_dims(state, 0))[0, :] transition_type, reward, discount = tf_env.step([action]) Train: critic_loss = ddpg_agent.critic_loss(states, actions, rewards, discounts, next_states) actor_loss = ddpg_agent.actor_loss(states) critic_train_op = slim.learning.create_train_op( critic_loss, critic_optimizer, variables_to_train=ddpg_agent.get_trainable_critic_vars(), ) actor_train_op = slim.learning.create_train_op( actor_loss, actor_optimizer, variables_to_train=ddpg_agent.get_trainable_actor_vars(), ) """ ACTOR_NET_SCOPE = 'actor_net' CRITIC_NET_SCOPE = 'critic_net' TARGET_ACTOR_NET_SCOPE = 'target_actor_net' TARGET_CRITIC_NET_SCOPE = 'target_critic_net' def __init__(self, observation_spec, action_spec, actor_net=networks.actor_net, critic_net=networks.critic_net, td_errors_loss=tf.losses.huber_loss, dqda_clipping=0., actions_regularizer=0., target_q_clipping=None, residual_phi=0.0, debug_summaries=False): """Constructs a DDPG agent. Args: observation_spec: A TensorSpec defining the observations. action_spec: A BoundedTensorSpec defining the actions. actor_net: A callable that creates the actor network. Must take the following arguments: states, num_actions. Please see networks.actor_net for an example. critic_net: A callable that creates the critic network. Must take the following arguments: states, actions. Please see networks.critic_net for an example. td_errors_loss: A callable defining the loss function for the critic td error. dqda_clipping: (float) clips the gradient dqda element-wise between [-dqda_clipping, dqda_clipping]. Does not perform clipping if dqda_clipping == 0. actions_regularizer: A scalar, when positive penalizes the norm of the actions. This can prevent saturation of actions for the actor_loss. target_q_clipping: (tuple of floats) clips target q values within (low, high) values when computing the critic loss. residual_phi: (float) [0.0, 1.0] Residual algorithm parameter that interpolates between Q-learning and residual gradient algorithm. http://www.leemon.com/papers/1995b.pdf debug_summaries: If True, add summaries to help debug behavior. Raises: ValueError: If 'dqda_clipping' is < 0. """ self._observation_spec = observation_spec[0] self._action_spec = action_spec[0] self._state_shape = tf.TensorShape([None]).concatenate( self._observation_spec.shape) self._action_shape = tf.TensorShape([None]).concatenate( self._action_spec.shape) self._num_action_dims = self._action_spec.shape.num_elements() self._scope = tf.get_variable_scope().name self._actor_net = tf.make_template( self.ACTOR_NET_SCOPE, actor_net, create_scope_now_=True) self._critic_net = tf.make_template( self.CRITIC_NET_SCOPE, critic_net, create_scope_now_=True) self._target_actor_net = tf.make_template( self.TARGET_ACTOR_NET_SCOPE, actor_net, create_scope_now_=True) self._target_critic_net = tf.make_template( self.TARGET_CRITIC_NET_SCOPE, critic_net, create_scope_now_=True) self._td_errors_loss = td_errors_loss if dqda_clipping < 0: raise ValueError('dqda_clipping must be >= 0.') self._dqda_clipping = dqda_clipping self._actions_regularizer = actions_regularizer self._target_q_clipping = target_q_clipping self._residual_phi = residual_phi self._debug_summaries = debug_summaries def _batch_state(self, state): """Convert state to a batched state. Args: state: Either a list/tuple with an state tensor [num_state_dims]. Returns: A tensor [1, num_state_dims] """ if isinstance(state, (tuple, list)): state = state[0] if state.get_shape().ndims == 1: state = tf.expand_dims(state, 0) return state def action(self, state): """Returns the next action for the state. Args: state: A [num_state_dims] tensor representing a state. Returns: A [num_action_dims] tensor representing the action. """ return self.actor_net(self._batch_state(state), stop_gradients=True)[0, :] @gin.configurable('ddpg_sample_action') def sample_action(self, state, stddev=1.0): """Returns the action for the state with additive noise. Args: state: A [num_state_dims] tensor representing a state. stddev: stddev for the Ornstein-Uhlenbeck noise. Returns: A [num_action_dims] action tensor. """ agent_action = self.action(state) agent_action += tf.random_normal(tf.shape(agent_action)) * stddev return utils.clip_to_spec(agent_action, self._action_spec) def actor_net(self, states, stop_gradients=False): """Returns the output of the actor network. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. stop_gradients: (boolean) if true, gradients cannot be propogated through this operation. Returns: A [batch_size, num_action_dims] tensor of actions. Raises: ValueError: If `states` does not have the expected dimensions. """ self._validate_states(states) actions = self._actor_net(states, self._action_spec) if stop_gradients: actions = tf.stop_gradient(actions) return actions def critic_net(self, states, actions, for_critic_loss=False): """Returns the output of the critic network. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. actions: A [batch_size, num_action_dims] tensor representing a batch of actions. Returns: q values: A [batch_size] tensor of q values. Raises: ValueError: If `states` or `actions' do not have the expected dimensions. """ self._validate_states(states) self._validate_actions(actions) return self._critic_net(states, actions, for_critic_loss=for_critic_loss) def target_actor_net(self, states): """Returns the output of the target actor network. The target network is used to compute stable targets for training. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. Returns: A [batch_size, num_action_dims] tensor of actions. Raises: ValueError: If `states` does not have the expected dimensions. """ self._validate_states(states) actions = self._target_actor_net(states, self._action_spec) return tf.stop_gradient(actions) def target_critic_net(self, states, actions, for_critic_loss=False): """Returns the output of the target critic network. The target network is used to compute stable targets for training. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. actions: A [batch_size, num_action_dims] tensor representing a batch of actions. Returns: q values: A [batch_size] tensor of q values. Raises: ValueError: If `states` or `actions' do not have the expected dimensions. """ self._validate_states(states) self._validate_actions(actions) return tf.stop_gradient( self._target_critic_net(states, actions, for_critic_loss=for_critic_loss)) def value_net(self, states, for_critic_loss=False): """Returns the output of the critic evaluated with the actor. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. Returns: q values: A [batch_size] tensor of q values. """ actions = self.actor_net(states) return self.critic_net(states, actions, for_critic_loss=for_critic_loss) def target_value_net(self, states, for_critic_loss=False): """Returns the output of the target critic evaluated with the target actor. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. Returns: q values: A [batch_size] tensor of q values. """ target_actions = self.target_actor_net(states) return self.target_critic_net(states, target_actions, for_critic_loss=for_critic_loss) def critic_loss(self, states, actions, rewards, discounts, next_states): """Computes a loss for training the critic network. The loss is the mean squared error between the Q value predictions of the critic and Q values estimated using TD-lambda. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. actions: A [batch_size, num_action_dims] tensor representing a batch of actions. rewards: A [batch_size, ...] tensor representing a batch of rewards, broadcastable to the critic net output. discounts: A [batch_size, ...] tensor representing a batch of discounts, broadcastable to the critic net output. next_states: A [batch_size, num_state_dims] tensor representing a batch of next states. Returns: A rank-0 tensor representing the critic loss. Raises: ValueError: If any of the inputs do not have the expected dimensions, or if their batch_sizes do not match. """ self._validate_states(states) self._validate_actions(actions) self._validate_states(next_states) target_q_values = self.target_value_net(next_states, for_critic_loss=True) td_targets = target_q_values * discounts + rewards if self._target_q_clipping is not None: td_targets = tf.clip_by_value(td_targets, self._target_q_clipping[0], self._target_q_clipping[1]) q_values = self.critic_net(states, actions, for_critic_loss=True) td_errors = td_targets - q_values if self._debug_summaries: gen_debug_td_error_summaries( target_q_values, q_values, td_targets, td_errors) loss = self._td_errors_loss(td_targets, q_values) if self._residual_phi > 0.0: # compute residual gradient loss residual_q_values = self.value_net(next_states, for_critic_loss=True) residual_td_targets = residual_q_values * discounts + rewards if self._target_q_clipping is not None: residual_td_targets = tf.clip_by_value(residual_td_targets, self._target_q_clipping[0], self._target_q_clipping[1]) residual_td_errors = residual_td_targets - q_values residual_loss = self._td_errors_loss( residual_td_targets, residual_q_values) loss = (loss * (1.0 - self._residual_phi) + residual_loss * self._residual_phi) return loss def actor_loss(self, states): """Computes a loss for training the actor network. Note that output does not represent an actual loss. It is called a loss only in the sense that its gradient w.r.t. the actor network weights is the correct gradient for training the actor network, i.e. dloss/dweights = (dq/da)*(da/dweights) which is the gradient used in Algorithm 1 of Lilicrap et al. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. Returns: A rank-0 tensor representing the actor loss. Raises: ValueError: If `states` does not have the expected dimensions. """ self._validate_states(states) actions = self.actor_net(states, stop_gradients=False) critic_values = self.critic_net(states, actions) q_values = self.critic_function(critic_values, states) dqda = tf.gradients([q_values], [actions])[0] dqda_unclipped = dqda if self._dqda_clipping > 0: dqda = tf.clip_by_value(dqda, -self._dqda_clipping, self._dqda_clipping) actions_norm = tf.norm(actions) if self._debug_summaries: with tf.name_scope('dqda'): tf.summary.scalar('actions_norm', actions_norm) tf.summary.histogram('dqda', dqda) tf.summary.histogram('dqda_unclipped', dqda_unclipped) tf.summary.histogram('actions', actions) for a in range(self._num_action_dims): tf.summary.histogram('dqda_unclipped_%d' % a, dqda_unclipped[:, a]) tf.summary.histogram('dqda_%d' % a, dqda[:, a]) actions_norm *= self._actions_regularizer return slim.losses.mean_squared_error(tf.stop_gradient(dqda + actions), actions, scope='actor_loss') + actions_norm @gin.configurable('ddpg_critic_function') def critic_function(self, critic_values, states, weights=None): """Computes q values based on critic_net outputs, states, and weights. Args: critic_values: A tf.float32 [batch_size, ...] tensor representing outputs from the critic net. states: A [batch_size, num_state_dims] tensor representing a batch of states. weights: A list or Numpy array or tensor with a shape broadcastable to `critic_values`. Returns: A tf.float32 [batch_size] tensor representing q values. """ del states # unused args if weights is not None: weights = tf.convert_to_tensor(weights, dtype=critic_values.dtype) critic_values *= weights if critic_values.shape.ndims > 1: critic_values = tf.reduce_sum(critic_values, range(1, critic_values.shape.ndims)) critic_values.shape.assert_has_rank(1) return critic_values @gin.configurable('ddpg_update_targets') def update_targets(self, tau=1.0): """Performs a soft update of the target network parameters. For each weight w_s in the actor/critic networks, and its corresponding weight w_t in the target actor/critic networks, a soft update is: w_t = (1- tau) x w_t + tau x ws Args: tau: A float scalar in [0, 1] Returns: An operation that performs a soft update of the target network parameters. Raises: ValueError: If `tau` is not in [0, 1]. """ if tau < 0 or tau > 1: raise ValueError('Input `tau` should be in [0, 1].') update_actor = utils.soft_variables_update( slim.get_trainable_variables( utils.join_scope(self._scope, self.ACTOR_NET_SCOPE)), slim.get_trainable_variables( utils.join_scope(self._scope, self.TARGET_ACTOR_NET_SCOPE)), tau) update_critic = utils.soft_variables_update( slim.get_trainable_variables( utils.join_scope(self._scope, self.CRITIC_NET_SCOPE)), slim.get_trainable_variables( utils.join_scope(self._scope, self.TARGET_CRITIC_NET_SCOPE)), tau) return tf.group(update_actor, update_critic, name='update_targets') def get_trainable_critic_vars(self): """Returns a list of trainable variables in the critic network. Returns: A list of trainable variables in the critic network. """ return slim.get_trainable_variables( utils.join_scope(self._scope, self.CRITIC_NET_SCOPE)) def get_trainable_actor_vars(self): """Returns a list of trainable variables in the actor network. Returns: A list of trainable variables in the actor network. """ return slim.get_trainable_variables( utils.join_scope(self._scope, self.ACTOR_NET_SCOPE)) def get_critic_vars(self): """Returns a list of all variables in the critic network. Returns: A list of trainable variables in the critic network. """ return slim.get_model_variables( utils.join_scope(self._scope, self.CRITIC_NET_SCOPE)) def get_actor_vars(self): """Returns a list of all variables in the actor network. Returns: A list of trainable variables in the actor network. """ return slim.get_model_variables( utils.join_scope(self._scope, self.ACTOR_NET_SCOPE)) def _validate_states(self, states): """Raises a value error if `states` does not have the expected shape. Args: states: A tensor. Raises: ValueError: If states.shape or states.dtype are not compatible with observation_spec. """ states.shape.assert_is_compatible_with(self._state_shape) if not states.dtype.is_compatible_with(self._observation_spec.dtype): raise ValueError('states.dtype={} is not compatible with' ' observation_spec.dtype={}'.format( states.dtype, self._observation_spec.dtype)) def _validate_actions(self, actions): """Raises a value error if `actions` does not have the expected shape. Args: actions: A tensor. Raises: ValueError: If actions.shape or actions.dtype are not compatible with action_spec. """ actions.shape.assert_is_compatible_with(self._action_shape) if not actions.dtype.is_compatible_with(self._action_spec.dtype): raise ValueError('actions.dtype={} is not compatible with' ' action_spec.dtype={}'.format( actions.dtype, self._action_spec.dtype)) @gin.configurable class TD3Agent(DdpgAgent): """An RL agent that learns using the TD3 algorithm.""" ACTOR_NET_SCOPE = 'actor_net' CRITIC_NET_SCOPE = 'critic_net' CRITIC_NET2_SCOPE = 'critic_net2' TARGET_ACTOR_NET_SCOPE = 'target_actor_net' TARGET_CRITIC_NET_SCOPE = 'target_critic_net' TARGET_CRITIC_NET2_SCOPE = 'target_critic_net2' def __init__(self, observation_spec, action_spec, actor_net=networks.actor_net, critic_net=networks.critic_net, td_errors_loss=tf.losses.huber_loss, dqda_clipping=0., actions_regularizer=0., target_q_clipping=None, residual_phi=0.0, debug_summaries=False): """Constructs a TD3 agent. Args: observation_spec: A TensorSpec defining the observations. action_spec: A BoundedTensorSpec defining the actions. actor_net: A callable that creates the actor network. Must take the following arguments: states, num_actions. Please see networks.actor_net for an example. critic_net: A callable that creates the critic network. Must take the following arguments: states, actions. Please see networks.critic_net for an example. td_errors_loss: A callable defining the loss function for the critic td error. dqda_clipping: (float) clips the gradient dqda element-wise between [-dqda_clipping, dqda_clipping]. Does not perform clipping if dqda_clipping == 0. actions_regularizer: A scalar, when positive penalizes the norm of the actions. This can prevent saturation of actions for the actor_loss. target_q_clipping: (tuple of floats) clips target q values within (low, high) values when computing the critic loss. residual_phi: (float) [0.0, 1.0] Residual algorithm parameter that interpolates between Q-learning and residual gradient algorithm. http://www.leemon.com/papers/1995b.pdf debug_summaries: If True, add summaries to help debug behavior. Raises: ValueError: If 'dqda_clipping' is < 0. """ self._observation_spec = observation_spec[0] self._action_spec = action_spec[0] self._state_shape = tf.TensorShape([None]).concatenate( self._observation_spec.shape) self._action_shape = tf.TensorShape([None]).concatenate( self._action_spec.shape) self._num_action_dims = self._action_spec.shape.num_elements() self._scope = tf.get_variable_scope().name self._actor_net = tf.make_template( self.ACTOR_NET_SCOPE, actor_net, create_scope_now_=True) self._critic_net = tf.make_template( self.CRITIC_NET_SCOPE, critic_net, create_scope_now_=True) self._critic_net2 = tf.make_template( self.CRITIC_NET2_SCOPE, critic_net, create_scope_now_=True) self._target_actor_net = tf.make_template( self.TARGET_ACTOR_NET_SCOPE, actor_net, create_scope_now_=True) self._target_critic_net = tf.make_template( self.TARGET_CRITIC_NET_SCOPE, critic_net, create_scope_now_=True) self._target_critic_net2 = tf.make_template( self.TARGET_CRITIC_NET2_SCOPE, critic_net, create_scope_now_=True) self._td_errors_loss = td_errors_loss if dqda_clipping < 0: raise ValueError('dqda_clipping must be >= 0.') self._dqda_clipping = dqda_clipping self._actions_regularizer = actions_regularizer self._target_q_clipping = target_q_clipping self._residual_phi = residual_phi self._debug_summaries = debug_summaries def get_trainable_critic_vars(self): """Returns a list of trainable variables in the critic network. NOTE: This gets the vars of both critic networks. Returns: A list of trainable variables in the critic network. """ return ( slim.get_trainable_variables( utils.join_scope(self._scope, self.CRITIC_NET_SCOPE))) def critic_net(self, states, actions, for_critic_loss=False): """Returns the output of the critic network. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. actions: A [batch_size, num_action_dims] tensor representing a batch of actions. Returns: q values: A [batch_size] tensor of q values. Raises: ValueError: If `states` or `actions' do not have the expected dimensions. """ values1 = self._critic_net(states, actions, for_critic_loss=for_critic_loss) values2 = self._critic_net2(states, actions, for_critic_loss=for_critic_loss) if for_critic_loss: return values1, values2 return values1 def target_critic_net(self, states, actions, for_critic_loss=False): """Returns the output of the target critic network. The target network is used to compute stable targets for training. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. actions: A [batch_size, num_action_dims] tensor representing a batch of actions. Returns: q values: A [batch_size] tensor of q values. Raises: ValueError: If `states` or `actions' do not have the expected dimensions. """ self._validate_states(states) self._validate_actions(actions) values1 = tf.stop_gradient( self._target_critic_net(states, actions, for_critic_loss=for_critic_loss)) values2 = tf.stop_gradient( self._target_critic_net2(states, actions, for_critic_loss=for_critic_loss)) if for_critic_loss: return values1, values2 return values1 def value_net(self, states, for_critic_loss=False): """Returns the output of the critic evaluated with the actor. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. Returns: q values: A [batch_size] tensor of q values. """ actions = self.actor_net(states) return self.critic_net(states, actions, for_critic_loss=for_critic_loss) def target_value_net(self, states, for_critic_loss=False): """Returns the output of the target critic evaluated with the target actor. Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. Returns: q values: A [batch_size] tensor of q values. """ target_actions = self.target_actor_net(states) noise = tf.clip_by_value( tf.random_normal(tf.shape(target_actions), stddev=0.2), -0.5, 0.5) values1, values2 = self.target_critic_net( states, target_actions + noise, for_critic_loss=for_critic_loss) values = tf.minimum(values1, values2) return values, values @gin.configurable('td3_update_targets') def update_targets(self, tau=1.0): """Performs a soft update of the target network parameters. For each weight w_s in the actor/critic networks, and its corresponding weight w_t in the target actor/critic networks, a soft update is: w_t = (1- tau) x w_t + tau x ws Args: tau: A float scalar in [0, 1] Returns: An operation that performs a soft update of the target network parameters. Raises: ValueError: If `tau` is not in [0, 1]. """ if tau < 0 or tau > 1: raise ValueError('Input `tau` should be in [0, 1].') update_actor = utils.soft_variables_update( slim.get_trainable_variables( utils.join_scope(self._scope, self.ACTOR_NET_SCOPE)), slim.get_trainable_variables( utils.join_scope(self._scope, self.TARGET_ACTOR_NET_SCOPE)), tau) # NOTE: This updates both critic networks. update_critic = utils.soft_variables_update( slim.get_trainable_variables( utils.join_scope(self._scope, self.CRITIC_NET_SCOPE)), slim.get_trainable_variables( utils.join_scope(self._scope, self.TARGET_CRITIC_NET_SCOPE)), tau) return tf.group(update_actor, update_critic, name='update_targets') def gen_debug_td_error_summaries( target_q_values, q_values, td_targets, td_errors): """Generates debug summaries for critic given a set of batch samples. Args: target_q_values: set of predicted next stage values. q_values: current predicted value for the critic network. td_targets: discounted target_q_values with added next stage reward. td_errors: the different between td_targets and q_values. """ with tf.name_scope('td_errors'): tf.summary.histogram('td_targets', td_targets) tf.summary.histogram('q_values', q_values) tf.summary.histogram('target_q_values', target_q_values) tf.summary.histogram('td_errors', td_errors) with tf.name_scope('td_targets'): tf.summary.scalar('mean', tf.reduce_mean(td_targets)) tf.summary.scalar('max', tf.reduce_max(td_targets)) tf.summary.scalar('min', tf.reduce_min(td_targets)) with tf.name_scope('q_values'): tf.summary.scalar('mean', tf.reduce_mean(q_values)) tf.summary.scalar('max', tf.reduce_max(q_values)) tf.summary.scalar('min', tf.reduce_min(q_values)) with tf.name_scope('target_q_values'): tf.summary.scalar('mean', tf.reduce_mean(target_q_values)) tf.summary.scalar('max', tf.reduce_max(target_q_values)) tf.summary.scalar('min', tf.reduce_min(target_q_values)) with tf.name_scope('td_errors'): tf.summary.scalar('mean', tf.reduce_mean(td_errors)) tf.summary.scalar('max', tf.reduce_max(td_errors)) tf.summary.scalar('min', tf.reduce_min(td_errors)) tf.summary.scalar('mean_abs', tf.reduce_mean(tf.abs(td_errors)))
29,000
38.190541
80
py
models
models-master/research/efficient-hrl/agents/__init__.py
1
0
0
py
models
models-master/research/efficient-hrl/agents/circular_buffer.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A circular buffer where each element is a list of tensors. Each element of the buffer is a list of tensors. An example use case is a replay buffer in reinforcement learning, where each element is a list of tensors representing the state, action, reward etc. New elements are added sequentially, and once the buffer is full, we start overwriting them in a circular fashion. Reading does not remove any elements, only adding new elements does. """ import collections import numpy as np import tensorflow as tf import gin.tf @gin.configurable class CircularBuffer(object): """A circular buffer where each element is a list of tensors.""" def __init__(self, buffer_size=1000, scope='replay_buffer'): """Circular buffer of list of tensors. Args: buffer_size: (integer) maximum number of tensor lists the buffer can hold. scope: (string) variable scope for creating the variables. """ self._buffer_size = np.int64(buffer_size) self._scope = scope self._tensors = collections.OrderedDict() with tf.variable_scope(self._scope): self._num_adds = tf.Variable(0, dtype=tf.int64, name='num_adds') self._num_adds_cs = tf.CriticalSection(name='num_adds') @property def buffer_size(self): return self._buffer_size @property def scope(self): return self._scope @property def num_adds(self): return self._num_adds def _create_variables(self, tensors): with tf.variable_scope(self._scope): for name in tensors.keys(): tensor = tensors[name] self._tensors[name] = tf.get_variable( name='BufferVariable_' + name, shape=[self._buffer_size] + tensor.get_shape().as_list(), dtype=tensor.dtype, trainable=False) def _validate(self, tensors): """Validate shapes of tensors.""" if len(tensors) != len(self._tensors): raise ValueError('Expected tensors to have %d elements. Received %d ' 'instead.' % (len(self._tensors), len(tensors))) if self._tensors.keys() != tensors.keys(): raise ValueError('The keys of tensors should be the always the same.' 'Received %s instead %s.' % (tensors.keys(), self._tensors.keys())) for name, tensor in tensors.items(): if tensor.get_shape().as_list() != self._tensors[ name].get_shape().as_list()[1:]: raise ValueError('Tensor %s has incorrect shape.' % name) if not tensor.dtype.is_compatible_with(self._tensors[name].dtype): raise ValueError( 'Tensor %s has incorrect data type. Expected %s, received %s' % (name, self._tensors[name].read_value().dtype, tensor.dtype)) def add(self, tensors): """Adds an element (list/tuple/dict of tensors) to the buffer. Args: tensors: (list/tuple/dict of tensors) to be added to the buffer. Returns: An add operation that adds the input `tensors` to the buffer. Similar to an enqueue_op. Raises: ValueError: If the shapes and data types of input `tensors' are not the same across calls to the add function. """ return self.maybe_add(tensors, True) def maybe_add(self, tensors, condition): """Adds an element (tensors) to the buffer based on the condition.. Args: tensors: (list/tuple of tensors) to be added to the buffer. condition: A boolean Tensor controlling whether the tensors would be added to the buffer or not. Returns: An add operation that adds the input `tensors` to the buffer. Similar to an maybe_enqueue_op. Raises: ValueError: If the shapes and data types of input `tensors' are not the same across calls to the add function. """ if not isinstance(tensors, dict): names = [str(i) for i in range(len(tensors))] tensors = collections.OrderedDict(zip(names, tensors)) if not isinstance(tensors, collections.OrderedDict): tensors = collections.OrderedDict( sorted(tensors.items(), key=lambda t: t[0])) if not self._tensors: self._create_variables(tensors) else: self._validate(tensors) #@tf.critical_section(self._position_mutex) def _increment_num_adds(): # Adding 0 to the num_adds variable is a trick to read the value of the # variable and return a read-only tensor. Doing this in a critical # section allows us to capture a snapshot of the variable that will # not be affected by other threads updating num_adds. return self._num_adds.assign_add(1) + 0 def _add(): num_adds_inc = self._num_adds_cs.execute(_increment_num_adds) current_pos = tf.mod(num_adds_inc - 1, self._buffer_size) update_ops = [] for name in self._tensors.keys(): update_ops.append( tf.scatter_update(self._tensors[name], current_pos, tensors[name])) return tf.group(*update_ops) return tf.contrib.framework.smart_cond(condition, _add, tf.no_op) def get_random_batch(self, batch_size, keys=None, num_steps=1): """Samples a batch of tensors from the buffer with replacement. Args: batch_size: (integer) number of elements to sample. keys: List of keys of tensors to retrieve. If None retrieve all. num_steps: (integer) length of trajectories to return. If > 1 will return a list of lists, where each internal list represents a trajectory of length num_steps. Returns: A list of tensors, where each element in the list is a batch sampled from one of the tensors in the buffer. Raises: ValueError: If get_random_batch is called before calling the add function. tf.errors.InvalidArgumentError: If this operation is executed before any items are added to the buffer. """ if not self._tensors: raise ValueError('The add function must be called before get_random_batch.') if keys is None: keys = self._tensors.keys() latest_start_index = self.get_num_adds() - num_steps + 1 empty_buffer_assert = tf.Assert( tf.greater(latest_start_index, 0), ['Not enough elements have been added to the buffer.']) with tf.control_dependencies([empty_buffer_assert]): max_index = tf.minimum(self._buffer_size, latest_start_index) indices = tf.random_uniform( [batch_size], minval=0, maxval=max_index, dtype=tf.int64) if num_steps == 1: return self.gather(indices, keys) else: return self.gather_nstep(num_steps, indices, keys) def gather(self, indices, keys=None): """Returns elements at the specified indices from the buffer. Args: indices: (list of integers or rank 1 int Tensor) indices in the buffer to retrieve elements from. keys: List of keys of tensors to retrieve. If None retrieve all. Returns: A list of tensors, where each element in the list is obtained by indexing one of the tensors in the buffer. Raises: ValueError: If gather is called before calling the add function. tf.errors.InvalidArgumentError: If indices are bigger than the number of items in the buffer. """ if not self._tensors: raise ValueError('The add function must be called before calling gather.') if keys is None: keys = self._tensors.keys() with tf.name_scope('Gather'): index_bound_assert = tf.Assert( tf.less( tf.to_int64(tf.reduce_max(indices)), tf.minimum(self.get_num_adds(), self._buffer_size)), ['Index out of bounds.']) with tf.control_dependencies([index_bound_assert]): indices = tf.convert_to_tensor(indices) batch = [] for key in keys: batch.append(tf.gather(self._tensors[key], indices, name=key)) return batch def gather_nstep(self, num_steps, indices, keys=None): """Returns elements at the specified indices from the buffer. Args: num_steps: (integer) length of trajectories to return. indices: (list of rank num_steps int Tensor) indices in the buffer to retrieve elements from for multiple trajectories. Each Tensor in the list represents the indices for a trajectory. keys: List of keys of tensors to retrieve. If None retrieve all. Returns: A list of list-of-tensors, where each element in the list is obtained by indexing one of the tensors in the buffer. Raises: ValueError: If gather is called before calling the add function. tf.errors.InvalidArgumentError: If indices are bigger than the number of items in the buffer. """ if not self._tensors: raise ValueError('The add function must be called before calling gather.') if keys is None: keys = self._tensors.keys() with tf.name_scope('Gather'): index_bound_assert = tf.Assert( tf.less_equal( tf.to_int64(tf.reduce_max(indices) + num_steps), self.get_num_adds()), ['Trajectory indices go out of bounds.']) with tf.control_dependencies([index_bound_assert]): indices = tf.map_fn( lambda x: tf.mod(tf.range(x, x + num_steps), self._buffer_size), indices, dtype=tf.int64) batch = [] for key in keys: def SampleTrajectories(trajectory_indices, key=key, num_steps=num_steps): trajectory_indices.set_shape([num_steps]) return tf.gather(self._tensors[key], trajectory_indices, name=key) batch.append(tf.map_fn(SampleTrajectories, indices, dtype=self._tensors[key].dtype)) return batch def get_position(self): """Returns the position at which the last element was added. Returns: An int tensor representing the index at which the last element was added to the buffer or -1 if no elements were added. """ return tf.cond(self.get_num_adds() < 1, lambda: self.get_num_adds() - 1, lambda: tf.mod(self.get_num_adds() - 1, self._buffer_size)) def get_num_adds(self): """Returns the number of additions to the buffer. Returns: An int tensor representing the number of elements that were added. """ def num_adds(): return self._num_adds.value() return self._num_adds_cs.execute(num_adds) def get_num_tensors(self): """Returns the number of tensors (slots) in the buffer.""" return len(self._tensors)
11,239
37.758621
82
py
models
models-master/research/efficient-hrl/context/context.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Context for Universal Value Function agents. A context specifies a list of contextual variables, each with own sampling and reward computation methods. Examples of contextual variables include goal states, reward combination vectors, etc. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tf_agents import specs import gin.tf from utils import utils as uvf_utils @gin.configurable class Context(object): """Base context.""" VAR_NAME = 'action' def __init__(self, tf_env, context_ranges=None, context_shapes=None, state_indices=None, variable_indices=None, gamma_index=None, settable_context=False, timers=None, samplers=None, reward_weights=None, reward_fn=None, random_sampler_mode='random', normalizers=None, context_transition_fn=None, context_multi_transition_fn=None, meta_action_every_n=None): self._tf_env = tf_env self.variable_indices = variable_indices self.gamma_index = gamma_index self._settable_context = settable_context self.timers = timers self._context_transition_fn = context_transition_fn self._context_multi_transition_fn = context_multi_transition_fn self._random_sampler_mode = random_sampler_mode # assign specs self._obs_spec = self._tf_env.observation_spec() self._context_shapes = tuple([ shape if shape is not None else self._obs_spec.shape for shape in context_shapes ]) self.context_specs = tuple([ specs.TensorSpec(dtype=self._obs_spec.dtype, shape=shape) for shape in self._context_shapes ]) if context_ranges is not None: self.context_ranges = context_ranges else: self.context_ranges = [None] * len(self._context_shapes) self.context_as_action_specs = tuple([ specs.BoundedTensorSpec( shape=shape, dtype=(tf.float32 if self._obs_spec.dtype in [tf.float32, tf.float64] else self._obs_spec.dtype), minimum=context_range[0], maximum=context_range[-1]) for shape, context_range in zip(self._context_shapes, self.context_ranges) ]) if state_indices is not None: self.state_indices = state_indices else: self.state_indices = [None] * len(self._context_shapes) if self.variable_indices is not None and self.n != len( self.variable_indices): raise ValueError( 'variable_indices (%s) must have the same length as contexts (%s).' % (self.variable_indices, self.context_specs)) assert self.n == len(self.context_ranges) assert self.n == len(self.state_indices) # assign reward/sampler fns self._sampler_fns = dict() self._samplers = dict() self._reward_fns = dict() # assign reward fns self._add_custom_reward_fns() reward_weights = reward_weights or None self._reward_fn = self._make_reward_fn(reward_fn, reward_weights) # assign samplers self._add_custom_sampler_fns() for mode, sampler_fns in samplers.items(): self._make_sampler_fn(sampler_fns, mode) # create normalizers if normalizers is None: self._normalizers = [None] * len(self.context_specs) else: self._normalizers = [ normalizer(tf.zeros(shape=spec.shape, dtype=spec.dtype)) if normalizer is not None else None for normalizer, spec in zip(normalizers, self.context_specs) ] assert self.n == len(self._normalizers) self.meta_action_every_n = meta_action_every_n # create vars self.context_vars = {} self.timer_vars = {} self.create_vars(self.VAR_NAME) self.t = tf.Variable( tf.zeros(shape=(), dtype=tf.int32), name='num_timer_steps') def _add_custom_reward_fns(self): pass def _add_custom_sampler_fns(self): pass def sample_random_contexts(self, batch_size): """Sample random batch contexts.""" assert self._random_sampler_mode is not None return self.sample_contexts(self._random_sampler_mode, batch_size)[0] def sample_contexts(self, mode, batch_size, state=None, next_state=None, **kwargs): """Sample a batch of contexts. Args: mode: A string representing the mode [`train`, `explore`, `eval`]. batch_size: Batch size. Returns: Two lists of [batch_size, num_context_dims] contexts. """ contexts, next_contexts = self._sampler_fns[mode]( batch_size, state=state, next_state=next_state, **kwargs) self._validate_contexts(contexts) self._validate_contexts(next_contexts) return contexts, next_contexts def compute_rewards(self, mode, states, actions, rewards, next_states, contexts): """Compute context-based rewards. Args: mode: A string representing the mode ['uvf', 'task']. states: A [batch_size, num_state_dims] tensor. actions: A [batch_size, num_action_dims] tensor. rewards: A [batch_size] tensor representing unmodified rewards. next_states: A [batch_size, num_state_dims] tensor. contexts: A list of [batch_size, num_context_dims] tensors. Returns: A [batch_size] tensor representing rewards. """ return self._reward_fn(states, actions, rewards, next_states, contexts) def _make_reward_fn(self, reward_fns_list, reward_weights): """Returns a fn that computes rewards. Args: reward_fns_list: A fn or a list of reward fns. mode: A string representing the operating mode. reward_weights: A list of reward weights. """ if not isinstance(reward_fns_list, (list, tuple)): reward_fns_list = [reward_fns_list] if reward_weights is None: reward_weights = [1.0] * len(reward_fns_list) assert len(reward_fns_list) == len(reward_weights) reward_fns_list = [ self._custom_reward_fns[fn] if isinstance(fn, (str,)) else fn for fn in reward_fns_list ] def reward_fn(*args, **kwargs): """Returns rewards, discounts.""" reward_tuples = [ reward_fn(*args, **kwargs) for reward_fn in reward_fns_list ] rewards_list = [reward_tuple[0] for reward_tuple in reward_tuples] discounts_list = [reward_tuple[1] for reward_tuple in reward_tuples] ndims = max([r.shape.ndims for r in rewards_list]) if ndims > 1: # expand reward shapes to allow broadcasting for i in range(len(rewards_list)): for _ in range(rewards_list[i].shape.ndims - ndims): rewards_list[i] = tf.expand_dims(rewards_list[i], axis=-1) for _ in range(discounts_list[i].shape.ndims - ndims): discounts_list[i] = tf.expand_dims(discounts_list[i], axis=-1) rewards = tf.add_n( [r * tf.to_float(w) for r, w in zip(rewards_list, reward_weights)]) discounts = discounts_list[0] for d in discounts_list[1:]: discounts *= d return rewards, discounts return reward_fn def _make_sampler_fn(self, sampler_cls_list, mode): """Returns a fn that samples a list of context vars. Args: sampler_cls_list: A list of sampler classes. mode: A string representing the operating mode. """ if not isinstance(sampler_cls_list, (list, tuple)): sampler_cls_list = [sampler_cls_list] self._samplers[mode] = [] sampler_fns = [] for spec, sampler in zip(self.context_specs, sampler_cls_list): if isinstance(sampler, (str,)): sampler_fn = self._custom_sampler_fns[sampler] else: sampler_fn = sampler(context_spec=spec) self._samplers[mode].append(sampler_fn) sampler_fns.append(sampler_fn) def batch_sampler_fn(batch_size, state=None, next_state=None, **kwargs): """Sampler fn.""" contexts_tuples = [ sampler(batch_size, state=state, next_state=next_state, **kwargs) for sampler in sampler_fns] contexts = [c[0] for c in contexts_tuples] next_contexts = [c[1] for c in contexts_tuples] contexts = [ normalizer.update_apply(c) if normalizer is not None else c for normalizer, c in zip(self._normalizers, contexts) ] next_contexts = [ normalizer.apply(c) if normalizer is not None else c for normalizer, c in zip(self._normalizers, next_contexts) ] return contexts, next_contexts self._sampler_fns[mode] = batch_sampler_fn def set_env_context_op(self, context, disable_unnormalizer=False): """Returns a TensorFlow op that sets the environment context. Args: context: A list of context Tensor variables. disable_unnormalizer: Disable unnormalization. Returns: A TensorFlow op that sets the environment context. """ ret_val = np.array(1.0, dtype=np.float32) if not self._settable_context: return tf.identity(ret_val) if not disable_unnormalizer: context = [ normalizer.unapply(tf.expand_dims(c, 0))[0] if normalizer is not None else c for normalizer, c in zip(self._normalizers, context) ] def set_context_func(*env_context_values): tf.logging.info('[set_env_context_op] Setting gym environment context.') # pylint: disable=protected-access self.gym_env.set_context(*env_context_values) return ret_val # pylint: enable=protected-access with tf.name_scope('set_env_context'): set_op = tf.py_func(set_context_func, context, tf.float32, name='set_env_context_py_func') set_op.set_shape([]) return set_op def set_replay(self, replay): """Set replay buffer for samplers. Args: replay: A replay buffer. """ for _, samplers in self._samplers.items(): for sampler in samplers: sampler.set_replay(replay) def get_clip_fns(self): """Returns a list of clip fns for contexts. Returns: A list of fns that clip context tensors. """ clip_fns = [] for context_range in self.context_ranges: def clip_fn(var_, range_=context_range): """Clip a tensor.""" if range_ is None: clipped_var = tf.identity(var_) elif isinstance(range_[0], (int, long, float, list, np.ndarray)): clipped_var = tf.clip_by_value( var_, range_[0], range_[1],) else: raise NotImplementedError(range_) return clipped_var clip_fns.append(clip_fn) return clip_fns def _validate_contexts(self, contexts): """Validate if contexts have right specs. Args: contexts: A list of [batch_size, num_context_dim] tensors. Raises: ValueError: If shape or dtype mismatches that of spec. """ for i, (context, spec) in enumerate(zip(contexts, self.context_specs)): if context[0].shape != spec.shape: raise ValueError('contexts[%d] has invalid shape %s wrt spec shape %s' % (i, context[0].shape, spec.shape)) if context.dtype != spec.dtype: raise ValueError('contexts[%d] has invalid dtype %s wrt spec dtype %s' % (i, context.dtype, spec.dtype)) def context_multi_transition_fn(self, contexts, **kwargs): """Returns multiple future contexts starting from a batch.""" assert self._context_multi_transition_fn return self._context_multi_transition_fn(contexts, None, None, **kwargs) def step(self, mode, agent=None, action_fn=None, **kwargs): """Returns [next_contexts..., next_timer] list of ops. Args: mode: a string representing the mode=[train, explore, eval]. **kwargs: kwargs for context_transition_fn. Returns: a list of ops that set the context. """ if agent is None: ops = [] if self._context_transition_fn is not None: def sampler_fn(): samples = self.sample_contexts(mode, 1)[0] return [s[0] for s in samples] values = self._context_transition_fn(self.vars, self.t, sampler_fn, **kwargs) ops += [tf.assign(var, value) for var, value in zip(self.vars, values)] ops.append(tf.assign_add(self.t, 1)) # increment timer return ops else: ops = agent.tf_context.step(mode, **kwargs) state = kwargs['state'] next_state = kwargs['next_state'] state_repr = kwargs['state_repr'] next_state_repr = kwargs['next_state_repr'] with tf.control_dependencies(ops): # Step high level context before computing low level one. # Get the context transition function output. values = self._context_transition_fn(self.vars, self.t, None, state=state_repr, next_state=next_state_repr) # Select a new goal every C steps, otherwise use context transition. low_level_context = [ tf.cond(tf.equal(self.t % self.meta_action_every_n, 0), lambda: tf.cast(action_fn(next_state, context=None), tf.float32), lambda: values)] ops = [tf.assign(var, value) for var, value in zip(self.vars, low_level_context)] with tf.control_dependencies(ops): return [tf.assign_add(self.t, 1)] # increment timer return ops def reset(self, mode, agent=None, action_fn=None, state=None): """Returns ops that reset the context. Args: mode: a string representing the mode=[train, explore, eval]. Returns: a list of ops that reset the context. """ if agent is None: values = self.sample_contexts(mode=mode, batch_size=1)[0] if values is None: return [] values = [value[0] for value in values] values[0] = uvf_utils.tf_print( values[0], values, message='context:reset, mode=%s' % mode, first_n=10, name='context:reset:%s' % mode) all_ops = [] for _, context_vars in sorted(self.context_vars.items()): ops = [tf.assign(var, value) for var, value in zip(context_vars, values)] all_ops += ops all_ops.append(self.set_env_context_op(values)) all_ops.append(tf.assign(self.t, 0)) # reset timer return all_ops else: ops = agent.tf_context.reset(mode) # NOTE: The code is currently written in such a way that the higher level # policy does not provide a low-level context until the second # observation. Insead, we just zero-out low-level contexts. for key, context_vars in sorted(self.context_vars.items()): ops += [tf.assign(var, tf.zeros_like(var)) for var, meta_var in zip(context_vars, agent.tf_context.context_vars[key])] ops.append(tf.assign(self.t, 0)) # reset timer return ops def create_vars(self, name, agent=None): """Create tf variables for contexts. Args: name: Name of the variables. Returns: A list of [num_context_dims] tensors. """ if agent is not None: meta_vars = agent.create_vars(name) else: meta_vars = {} assert name not in self.context_vars, ('Conflict! %s is already ' 'initialized.') % name self.context_vars[name] = tuple([ tf.Variable( tf.zeros(shape=spec.shape, dtype=spec.dtype), name='%s_context_%d' % (name, i)) for i, spec in enumerate(self.context_specs) ]) return self.context_vars[name], meta_vars @property def n(self): return len(self.context_specs) @property def vars(self): return self.context_vars[self.VAR_NAME] # pylint: disable=protected-access @property def gym_env(self): return self._tf_env.pyenv._gym_env @property def tf_env(self): return self._tf_env # pylint: enable=protected-access
16,785
34.867521
99
py
models
models-master/research/efficient-hrl/context/context_transition_functions.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Context functions. Given the current contexts, timer and context sampler, returns new contexts after an environment step. This can be used to define a high-level policy that controls contexts as its actions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import gin.tf import utils as uvf_utils @gin.configurable def periodic_context_fn(contexts, timer, sampler_fn, period=1): """Periodically samples contexts. Args: contexts: a list of [num_context_dims] tensor variables representing current contexts. timer: a scalar integer tensor variable holding the current time step. sampler_fn: a sampler function that samples a list of [num_context_dims] tensors. period: (integer) period of update. Returns: a list of [num_context_dims] tensors. """ contexts = list(contexts[:]) # create copy return tf.cond(tf.mod(timer, period) == 0, sampler_fn, lambda: contexts) @gin.configurable def timer_context_fn(contexts, timer, sampler_fn, period=1, timer_index=-1, debug=False): """Samples contexts based on timer in contexts. Args: contexts: a list of [num_context_dims] tensor variables representing current contexts. timer: a scalar integer tensor variable holding the current time step. sampler_fn: a sampler function that samples a list of [num_context_dims] tensors. period: (integer) period of update; actual period = `period` + 1. timer_index: (integer) Index of context list that present timer. debug: (boolean) Print debug messages. Returns: a list of [num_context_dims] tensors. """ contexts = list(contexts[:]) # create copy cond = tf.equal(contexts[timer_index][0], 0) def reset(): """Sample context and reset the timer.""" new_contexts = sampler_fn() new_contexts[timer_index] = tf.zeros_like( contexts[timer_index]) + period return new_contexts def update(): """Decrement the timer.""" contexts[timer_index] -= 1 return contexts values = tf.cond(cond, reset, update) if debug: values[0] = uvf_utils.tf_print( values[0], values + [timer], 'timer_context_fn', first_n=200, name='timer_context_fn:contexts') return values @gin.configurable def relative_context_transition_fn( contexts, timer, sampler_fn, k=2, state=None, next_state=None, **kwargs): """Contexts updated to be relative to next state. """ contexts = list(contexts[:]) # create copy assert len(contexts) == 1 new_contexts = [ tf.concat( [contexts[0][:k] + state[:k] - next_state[:k], contexts[0][k:]], -1)] return new_contexts @gin.configurable def relative_context_multi_transition_fn( contexts, timer, sampler_fn, k=2, states=None, **kwargs): """Given contexts at first state and sequence of states, derives sequence of all contexts. """ contexts = list(contexts[:]) # create copy assert len(contexts) == 1 contexts = [ tf.concat( [tf.expand_dims(contexts[0][:, :k] + states[:, 0, :k], 1) - states[:, :, :k], contexts[0][:, None, k:] * tf.ones_like(states[:, :, :1])], -1)] return contexts
4,053
31.693548
92
py
models
models-master/research/efficient-hrl/context/gin_imports.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Import gin configurable modules. """ # pylint: disable=unused-import from context import context from context import context_transition_functions from context import gin_utils from context import rewards_functions from context import samplers # pylint: disable=unused-import
968
36.269231
80
py
models
models-master/research/efficient-hrl/context/rewards_functions.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Reward shaping functions used by Contexts. Each reward function should take the following inputs and return new rewards, and discounts. new_rewards, discounts = reward_fn(states, actions, rewards, next_states, contexts) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import gin.tf def summarize_stats(stats): """Summarize a dictionary of variables. Args: stats: a dictionary of {name: tensor} to compute stats over. """ for name, stat in stats.items(): mean = tf.reduce_mean(stat) tf.summary.scalar('mean_%s' % name, mean) tf.summary.scalar('max_%s' % name, tf.reduce_max(stat)) tf.summary.scalar('min_%s' % name, tf.reduce_min(stat)) std = tf.sqrt(tf.reduce_mean(tf.square(stat)) - tf.square(mean) + 1e-10) tf.summary.scalar('std_%s' % name, std) tf.summary.histogram(name, stat) def index_states(states, indices): """Return indexed states. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. indices: (a list of Numpy integer array) Indices of states dimensions to be mapped. Returns: A [batch_size, num_indices] Tensor representing the batch of indexed states. """ if indices is None: return states indices = tf.constant(indices, dtype=tf.int32) return tf.gather(states, indices=indices, axis=1) def record_tensor(tensor, indices, stats, name='states'): """Record specified tensor dimensions into stats. Args: tensor: A [batch_size, num_dims] Tensor. indices: (a list of integers) Indices of dimensions to record. stats: A dictionary holding stats. name: (string) Name of tensor. """ if indices is None: indices = range(tensor.shape.as_list()[1]) for index in indices: stats['%s_%02d' % (name, index)] = tensor[:, index] @gin.configurable def potential_rewards(states, actions, rewards, next_states, contexts, gamma=1.0, reward_fn=None): """Return the potential-based rewards. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. gamma: Reward discount. reward_fn: A reward function. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del actions # unused args gamma = tf.to_float(gamma) rewards_tp1, discounts = reward_fn(None, None, rewards, next_states, contexts) rewards, _ = reward_fn(None, None, rewards, states, contexts) return -rewards + gamma * rewards_tp1, discounts @gin.configurable def timed_rewards(states, actions, rewards, next_states, contexts, reward_fn=None, dense=False, timer_index=-1): """Return the timed rewards. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. reward_fn: A reward function. dense: (boolean) Provide dense rewards or sparse rewards at time = 0. timer_index: (integer) The context list index that specifies timer. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ assert contexts[timer_index].get_shape().as_list()[1] == 1 timers = contexts[timer_index][:, 0] rewards, discounts = reward_fn(states, actions, rewards, next_states, contexts) terminates = tf.to_float(timers <= 0) # if terminate set 1, else set 0 for _ in range(rewards.shape.ndims - 1): terminates = tf.expand_dims(terminates, axis=-1) if not dense: rewards *= terminates # if terminate, return rewards, else return 0 discounts *= (tf.to_float(1.0) - terminates) return rewards, discounts @gin.configurable def reset_rewards(states, actions, rewards, next_states, contexts, reset_index=0, reset_state=None, reset_reward_function=None, include_forward_rewards=True, include_reset_rewards=True): """Returns the rewards for a forward/reset agent. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. reset_index: (integer) The context list index that specifies reset. reset_state: Reset state. reset_reward_function: Reward function for reset step. include_forward_rewards: Include the rewards from the forward pass. include_reset_rewards: Include the rewards from the reset pass. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ reset_state = tf.constant( reset_state, dtype=next_states.dtype, shape=next_states.shape) reset_states = tf.expand_dims(reset_state, 0) def true_fn(): if include_reset_rewards: return reset_reward_function(states, actions, rewards, next_states, [reset_states] + contexts[1:]) else: return tf.zeros_like(rewards), tf.ones_like(rewards) def false_fn(): if include_forward_rewards: return plain_rewards(states, actions, rewards, next_states, contexts) else: return tf.zeros_like(rewards), tf.ones_like(rewards) rewards, discounts = tf.cond( tf.cast(contexts[reset_index][0, 0], dtype=tf.bool), true_fn, false_fn) return rewards, discounts @gin.configurable def tanh_similarity(states, actions, rewards, next_states, contexts, mse_scale=1.0, state_scales=1.0, goal_scales=1.0, summarize=False): """Returns the similarity between next_states and contexts using tanh and mse. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. mse_scale: A float, to scale mse before tanh. state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, must be broadcastable to number of state dimensions. goal_scales: multiplicative scale for contexts. A scalar or 1D tensor, must be broadcastable to number of goal dimensions. summarize: (boolean) enable summary ops. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del states, actions, rewards # Unused mse = tf.reduce_mean(tf.squared_difference(next_states * state_scales, contexts[0] * goal_scales), -1) tanh = tf.tanh(mse_scale * mse) if summarize: with tf.name_scope('RewardFn/'): tf.summary.scalar('mean_mse', tf.reduce_mean(mse)) tf.summary.histogram('mse', mse) tf.summary.scalar('mean_tanh', tf.reduce_mean(tanh)) tf.summary.histogram('tanh', tanh) rewards = tf.to_float(1 - tanh) return rewards, tf.ones_like(rewards) @gin.configurable def negative_mse(states, actions, rewards, next_states, contexts, state_scales=1.0, goal_scales=1.0, summarize=False): """Returns the negative mean square error between next_states and contexts. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, must be broadcastable to number of state dimensions. goal_scales: multiplicative scale for contexts. A scalar or 1D tensor, must be broadcastable to number of goal dimensions. summarize: (boolean) enable summary ops. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del states, actions, rewards # Unused mse = tf.reduce_mean(tf.squared_difference(next_states * state_scales, contexts[0] * goal_scales), -1) if summarize: with tf.name_scope('RewardFn/'): tf.summary.scalar('mean_mse', tf.reduce_mean(mse)) tf.summary.histogram('mse', mse) rewards = tf.to_float(-mse) return rewards, tf.ones_like(rewards) @gin.configurable def negative_distance(states, actions, rewards, next_states, contexts, state_scales=1.0, goal_scales=1.0, reward_scales=1.0, weight_index=None, weight_vector=None, summarize=False, termination_epsilon=1e-4, state_indices=None, goal_indices=None, vectorize=False, relative_context=False, diff=False, norm='L2', epsilon=1e-10, bonus_epsilon=0., #5., offset=0.0): """Returns the negative euclidean distance between next_states and contexts. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, must be broadcastable to number of state dimensions. goal_scales: multiplicative scale for goals. A scalar or 1D tensor, must be broadcastable to number of goal dimensions. reward_scales: multiplicative scale for rewards. A scalar or 1D tensor, must be broadcastable to number of reward dimensions. weight_index: (integer) The context list index that specifies weight. weight_vector: (a number or a list or Numpy array) The weighting vector, broadcastable to `next_states`. summarize: (boolean) enable summary ops. termination_epsilon: terminate if dist is less than this quantity. state_indices: (a list of integers) list of state indices to select. goal_indices: (a list of integers) list of goal indices to select. vectorize: Return a vectorized form. norm: L1 or L2. epsilon: small offset to ensure non-negative/zero distance. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del actions, rewards # Unused stats = {} record_tensor(next_states, state_indices, stats, 'next_states') states = index_states(states, state_indices) next_states = index_states(next_states, state_indices) goals = index_states(contexts[0], goal_indices) if relative_context: goals = states + goals sq_dists = tf.squared_difference(next_states * state_scales, goals * goal_scales) old_sq_dists = tf.squared_difference(states * state_scales, goals * goal_scales) record_tensor(sq_dists, None, stats, 'sq_dists') if weight_vector is not None: sq_dists *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) old_sq_dists *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) if weight_index is not None: #sq_dists *= contexts[weight_index] weights = tf.abs(index_states(contexts[0], weight_index)) #weights /= tf.reduce_sum(weights, -1, keepdims=True) sq_dists *= weights old_sq_dists *= weights if norm == 'L1': dist = tf.sqrt(sq_dists + epsilon) old_dist = tf.sqrt(old_sq_dists + epsilon) if not vectorize: dist = tf.reduce_sum(dist, -1) old_dist = tf.reduce_sum(old_dist, -1) elif norm == 'L2': if vectorize: dist = sq_dists old_dist = old_sq_dists else: dist = tf.reduce_sum(sq_dists, -1) old_dist = tf.reduce_sum(old_sq_dists, -1) dist = tf.sqrt(dist + epsilon) # tf.gradients fails when tf.sqrt(-0.0) old_dist = tf.sqrt(old_dist + epsilon) # tf.gradients fails when tf.sqrt(-0.0) else: raise NotImplementedError(norm) discounts = dist > termination_epsilon if summarize: with tf.name_scope('RewardFn/'): tf.summary.scalar('mean_dist', tf.reduce_mean(dist)) tf.summary.histogram('dist', dist) summarize_stats(stats) bonus = tf.to_float(dist < bonus_epsilon) dist *= reward_scales old_dist *= reward_scales if diff: return bonus + offset + tf.to_float(old_dist - dist), tf.to_float(discounts) return bonus + offset + tf.to_float(-dist), tf.to_float(discounts) @gin.configurable def cosine_similarity(states, actions, rewards, next_states, contexts, state_scales=1.0, goal_scales=1.0, reward_scales=1.0, normalize_states=True, normalize_goals=True, weight_index=None, weight_vector=None, summarize=False, state_indices=None, goal_indices=None, offset=0.0): """Returns the cosine similarity between next_states - states and contexts. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, must be broadcastable to number of state dimensions. goal_scales: multiplicative scale for goals. A scalar or 1D tensor, must be broadcastable to number of goal dimensions. reward_scales: multiplicative scale for rewards. A scalar or 1D tensor, must be broadcastable to number of reward dimensions. weight_index: (integer) The context list index that specifies weight. weight_vector: (a number or a list or Numpy array) The weighting vector, broadcastable to `next_states`. summarize: (boolean) enable summary ops. termination_epsilon: terminate if dist is less than this quantity. state_indices: (a list of integers) list of state indices to select. goal_indices: (a list of integers) list of goal indices to select. vectorize: Return a vectorized form. norm: L1 or L2. epsilon: small offset to ensure non-negative/zero distance. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del actions, rewards # Unused stats = {} record_tensor(next_states, state_indices, stats, 'next_states') states = index_states(states, state_indices) next_states = index_states(next_states, state_indices) goals = index_states(contexts[0], goal_indices) if weight_vector is not None: goals *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) if weight_index is not None: weights = tf.abs(index_states(contexts[0], weight_index)) goals *= weights direction_vec = next_states - states if normalize_states: direction_vec = tf.nn.l2_normalize(direction_vec, -1) goal_vec = goals if normalize_goals: goal_vec = tf.nn.l2_normalize(goal_vec, -1) similarity = tf.reduce_sum(goal_vec * direction_vec, -1) discounts = tf.ones_like(similarity) return offset + tf.to_float(similarity), tf.to_float(discounts) @gin.configurable def diff_distance(states, actions, rewards, next_states, contexts, state_scales=1.0, goal_scales=1.0, reward_scales=1.0, weight_index=None, weight_vector=None, summarize=False, termination_epsilon=1e-4, state_indices=None, goal_indices=None, norm='L2', epsilon=1e-10): """Returns the difference in euclidean distance between states/next_states and contexts. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. state_scales: multiplicative scale for (next) states. A scalar or 1D tensor, must be broadcastable to number of state dimensions. goal_scales: multiplicative scale for goals. A scalar or 1D tensor, must be broadcastable to number of goal dimensions. reward_scales: multiplicative scale for rewards. A scalar or 1D tensor, must be broadcastable to number of reward dimensions. weight_index: (integer) The context list index that specifies weight. weight_vector: (a number or a list or Numpy array) The weighting vector, broadcastable to `next_states`. summarize: (boolean) enable summary ops. termination_epsilon: terminate if dist is less than this quantity. state_indices: (a list of integers) list of state indices to select. goal_indices: (a list of integers) list of goal indices to select. vectorize: Return a vectorized form. norm: L1 or L2. epsilon: small offset to ensure non-negative/zero distance. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del actions, rewards # Unused stats = {} record_tensor(next_states, state_indices, stats, 'next_states') next_states = index_states(next_states, state_indices) states = index_states(states, state_indices) goals = index_states(contexts[0], goal_indices) next_sq_dists = tf.squared_difference(next_states * state_scales, goals * goal_scales) sq_dists = tf.squared_difference(states * state_scales, goals * goal_scales) record_tensor(sq_dists, None, stats, 'sq_dists') if weight_vector is not None: next_sq_dists *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) sq_dists *= tf.convert_to_tensor(weight_vector, dtype=next_states.dtype) if weight_index is not None: next_sq_dists *= contexts[weight_index] sq_dists *= contexts[weight_index] if norm == 'L1': next_dist = tf.sqrt(next_sq_dists + epsilon) dist = tf.sqrt(sq_dists + epsilon) next_dist = tf.reduce_sum(next_dist, -1) dist = tf.reduce_sum(dist, -1) elif norm == 'L2': next_dist = tf.reduce_sum(next_sq_dists, -1) next_dist = tf.sqrt(next_dist + epsilon) # tf.gradients fails when tf.sqrt(-0.0) dist = tf.reduce_sum(sq_dists, -1) dist = tf.sqrt(dist + epsilon) # tf.gradients fails when tf.sqrt(-0.0) else: raise NotImplementedError(norm) discounts = next_dist > termination_epsilon if summarize: with tf.name_scope('RewardFn/'): tf.summary.scalar('mean_dist', tf.reduce_mean(dist)) tf.summary.histogram('dist', dist) summarize_stats(stats) diff = dist - next_dist diff *= reward_scales return tf.to_float(diff), tf.to_float(discounts) @gin.configurable def binary_indicator(states, actions, rewards, next_states, contexts, termination_epsilon=1e-4, offset=0, epsilon=1e-10, state_indices=None, summarize=False): """Returns 0/1 by checking if next_states and contexts overlap. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. termination_epsilon: terminate if dist is less than this quantity. offset: Offset the rewards. epsilon: small offset to ensure non-negative/zero distance. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del states, actions # unused args next_states = index_states(next_states, state_indices) dist = tf.reduce_sum(tf.squared_difference(next_states, contexts[0]), -1) dist = tf.sqrt(dist + epsilon) discounts = dist > termination_epsilon rewards = tf.logical_not(discounts) rewards = tf.to_float(rewards) + offset return tf.to_float(rewards), tf.ones_like(tf.to_float(discounts)) #tf.to_float(discounts) @gin.configurable def plain_rewards(states, actions, rewards, next_states, contexts): """Returns the given rewards. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del states, actions, next_states, contexts # Unused return rewards, tf.ones_like(rewards) @gin.configurable def ctrl_rewards(states, actions, rewards, next_states, contexts, reward_scales=1.0): """Returns the negative control cost. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. reward_scales: multiplicative scale for rewards. A scalar or 1D tensor, must be broadcastable to number of reward dimensions. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del states, rewards, contexts # Unused if actions is None: rewards = tf.to_float(tf.zeros(shape=next_states.shape[:1])) else: rewards = -tf.reduce_sum(tf.square(actions), axis=1) rewards *= reward_scales rewards = tf.to_float(rewards) return rewards, tf.ones_like(rewards) @gin.configurable def diff_rewards( states, actions, rewards, next_states, contexts, state_indices=None, goal_index=0,): """Returns (next_states - goals) as a batched vector reward.""" del states, rewards, actions # Unused if state_indices is not None: next_states = index_states(next_states, state_indices) rewards = tf.to_float(next_states - contexts[goal_index]) return rewards, tf.ones_like(rewards) @gin.configurable def state_rewards(states, actions, rewards, next_states, contexts, weight_index=None, state_indices=None, weight_vector=1.0, offset_vector=0.0, summarize=False): """Returns the rewards that are linear mapping of next_states. Args: states: A [batch_size, num_state_dims] Tensor representing a batch of states. actions: A [batch_size, num_action_dims] Tensor representing a batch of actions. rewards: A [batch_size] Tensor representing a batch of rewards. next_states: A [batch_size, num_state_dims] Tensor representing a batch of next states. contexts: A list of [batch_size, num_context_dims] Tensor representing a batch of contexts. weight_index: (integer) Index of contexts lists that specify weighting. state_indices: (a list of Numpy integer array) Indices of states dimensions to be mapped. weight_vector: (a number or a list or Numpy array) The weighting vector, broadcastable to `next_states`. offset_vector: (a number or a list of Numpy array) The off vector. summarize: (boolean) enable summary ops. Returns: A new tf.float32 [batch_size] rewards Tensor, and tf.float32 [batch_size] discounts tensor. """ del states, actions, rewards # unused args stats = {} record_tensor(next_states, state_indices, stats) next_states = index_states(next_states, state_indices) weight = tf.constant( weight_vector, dtype=next_states.dtype, shape=next_states[0].shape) weights = tf.expand_dims(weight, 0) offset = tf.constant( offset_vector, dtype=next_states.dtype, shape=next_states[0].shape) offsets = tf.expand_dims(offset, 0) if weight_index is not None: weights *= contexts[weight_index] rewards = tf.to_float(tf.reduce_sum(weights * (next_states+offsets), axis=1)) if summarize: with tf.name_scope('RewardFn/'): summarize_stats(stats) return rewards, tf.ones_like(rewards)
28,732
37.72372
91
py
models
models-master/research/efficient-hrl/context/gin_utils.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gin configurable utility functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import gin.tf @gin.configurable def gin_sparse_array(size, values, indices, fill_value=0): arr = np.zeros(size) arr.fill(fill_value) arr[indices] = values return arr @gin.configurable def gin_sum(values): result = values[0] for value in values[1:]: result += value return result @gin.configurable def gin_range(n): return range(n)
1,219
25.521739
80
py
models
models-master/research/efficient-hrl/context/__init__.py
1
0
0
py
models
models-master/research/efficient-hrl/context/samplers.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Samplers for Contexts. Each sampler class should define __call__(batch_size). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf slim = tf.contrib.slim import gin.tf @gin.configurable class BaseSampler(object): """Base sampler.""" def __init__(self, context_spec, context_range=None, k=2, scope='sampler'): """Construct a base sampler. Args: context_spec: A context spec. context_range: A tuple of (minval, max), where minval, maxval are floats or Numpy arrays with the same shape as the context. scope: A string denoting scope. """ self._context_spec = context_spec self._context_range = context_range self._k = k self._scope = scope def __call__(self, batch_size, **kwargs): raise NotImplementedError def set_replay(self, replay=None): pass def _validate_contexts(self, contexts): """Validate if contexts have right spec. Args: contexts: A [batch_size, num_contexts_dim] tensor. Raises: ValueError: If shape or dtype mismatches that of spec. """ if contexts[0].shape != self._context_spec.shape: raise ValueError('contexts has invalid shape %s wrt spec shape %s' % (contexts[0].shape, self._context_spec.shape)) if contexts.dtype != self._context_spec.dtype: raise ValueError('contexts has invalid dtype %s wrt spec dtype %s' % (contexts.dtype, self._context_spec.dtype)) @gin.configurable class ZeroSampler(BaseSampler): """Zero sampler.""" def __call__(self, batch_size, **kwargs): """Sample a batch of context. Args: batch_size: Batch size. Returns: Two [batch_size, num_context_dims] tensors. """ contexts = tf.zeros( dtype=self._context_spec.dtype, shape=[ batch_size, ] + self._context_spec.shape.as_list()) return contexts, contexts @gin.configurable class BinarySampler(BaseSampler): """Binary sampler.""" def __init__(self, probs=0.5, *args, **kwargs): """Constructor.""" super(BinarySampler, self).__init__(*args, **kwargs) self._probs = probs def __call__(self, batch_size, **kwargs): """Sample a batch of context.""" spec = self._context_spec contexts = tf.random_uniform( shape=[ batch_size, ] + spec.shape.as_list(), dtype=tf.float32) contexts = tf.cast(tf.greater(contexts, self._probs), dtype=spec.dtype) return contexts, contexts @gin.configurable class RandomSampler(BaseSampler): """Random sampler.""" def __call__(self, batch_size, **kwargs): """Sample a batch of context. Args: batch_size: Batch size. Returns: Two [batch_size, num_context_dims] tensors. """ spec = self._context_spec context_range = self._context_range if isinstance(context_range[0], (int, float)): contexts = tf.random_uniform( shape=[ batch_size, ] + spec.shape.as_list(), minval=context_range[0], maxval=context_range[1], dtype=spec.dtype) elif isinstance(context_range[0], (list, tuple, np.ndarray)): assert len(spec.shape.as_list()) == 1 assert spec.shape.as_list()[0] == len(context_range[0]) assert spec.shape.as_list()[0] == len(context_range[1]) contexts = tf.concat( [ tf.random_uniform( shape=[ batch_size, 1, ] + spec.shape.as_list()[1:], minval=context_range[0][i], maxval=context_range[1][i], dtype=spec.dtype) for i in range(spec.shape.as_list()[0]) ], axis=1) else: raise NotImplementedError(context_range) self._validate_contexts(contexts) state, next_state = kwargs['state'], kwargs['next_state'] if state is not None and next_state is not None: pass #contexts = tf.concat( # [tf.random_normal(tf.shape(state[:, :self._k]), dtype=tf.float64) + # tf.random_shuffle(state[:, :self._k]), # contexts[:, self._k:]], 1) return contexts, contexts @gin.configurable class ScheduledSampler(BaseSampler): """Scheduled sampler.""" def __init__(self, scope='default', values=None, scheduler='cycle', scheduler_params=None, *args, **kwargs): """Construct sampler. Args: scope: Scope name. values: A list of numbers or [num_context_dim] Numpy arrays representing the values to cycle. scheduler: scheduler type. scheduler_params: scheduler parameters. *args: arguments. **kwargs: keyword arguments. """ super(ScheduledSampler, self).__init__(*args, **kwargs) self._scope = scope self._values = values self._scheduler = scheduler self._scheduler_params = scheduler_params or {} assert self._values is not None and len( self._values), 'must provide non-empty values.' self._n = len(self._values) # TODO(shanegu): move variable creation outside. resolve tf.cond problem. self._count = 0 self._i = tf.Variable( tf.zeros(shape=(), dtype=tf.int32), name='%s-scheduled_sampler_%d' % (self._scope, self._count)) self._values = tf.constant(self._values, dtype=self._context_spec.dtype) def __call__(self, batch_size, **kwargs): """Sample a batch of context. Args: batch_size: Batch size. Returns: Two [batch_size, num_context_dims] tensors. """ spec = self._context_spec next_op = self._next(self._i) with tf.control_dependencies([next_op]): value = self._values[self._i] if value.get_shape().as_list(): values = tf.tile( tf.expand_dims(value, 0), (batch_size,) + (1,) * spec.shape.ndims) else: values = value + tf.zeros( shape=[ batch_size, ] + spec.shape.as_list(), dtype=spec.dtype) self._validate_contexts(values) self._count += 1 return values, values def _next(self, i): """Return op that increments pointer to next value. Args: i: A tensorflow integer variable. Returns: Op that increments pointer. """ if self._scheduler == 'cycle': inc = ('inc' in self._scheduler_params and self._scheduler_params['inc']) or 1 return tf.assign(i, tf.mod(i+inc, self._n)) else: raise NotImplementedError(self._scheduler) @gin.configurable class ReplaySampler(BaseSampler): """Replay sampler.""" def __init__(self, prefetch_queue_capacity=2, override_indices=None, state_indices=None, *args, **kwargs): """Construct sampler. Args: prefetch_queue_capacity: Capacity for prefetch queue. override_indices: Override indices. state_indices: Select certain indices from state dimension. *args: arguments. **kwargs: keyword arguments. """ super(ReplaySampler, self).__init__(*args, **kwargs) self._prefetch_queue_capacity = prefetch_queue_capacity self._override_indices = override_indices self._state_indices = state_indices def set_replay(self, replay): """Set replay. Args: replay: A replay buffer. """ self._replay = replay def __call__(self, batch_size, **kwargs): """Sample a batch of context. Args: batch_size: Batch size. Returns: Two [batch_size, num_context_dims] tensors. """ batch = self._replay.GetRandomBatch(batch_size) next_states = batch[4] if self._prefetch_queue_capacity > 0: batch_queue = slim.prefetch_queue.prefetch_queue( [next_states], capacity=self._prefetch_queue_capacity, name='%s/batch_context_queue' % self._scope) next_states = batch_queue.dequeue() if self._override_indices is not None: assert self._context_range is not None and isinstance( self._context_range[0], (int, long, float)) next_states = tf.concat( [ tf.random_uniform( shape=next_states[:, :1].shape, minval=self._context_range[0], maxval=self._context_range[1], dtype=next_states.dtype) if i in self._override_indices else next_states[:, i:i + 1] for i in range(self._context_spec.shape.as_list()[0]) ], axis=1) if self._state_indices is not None: next_states = tf.concat( [ next_states[:, i:i + 1] for i in range(self._context_spec.shape.as_list()[0]) ], axis=1) self._validate_contexts(next_states) return next_states, next_states @gin.configurable class TimeSampler(BaseSampler): """Time Sampler.""" def __init__(self, minval=0, maxval=1, timestep=-1, *args, **kwargs): """Construct sampler. Args: minval: Min value integer. maxval: Max value integer. timestep: Time step between states and next_states. *args: arguments. **kwargs: keyword arguments. """ super(TimeSampler, self).__init__(*args, **kwargs) assert self._context_spec.shape.as_list() == [1] self._minval = minval self._maxval = maxval self._timestep = timestep def __call__(self, batch_size, **kwargs): """Sample a batch of context. Args: batch_size: Batch size. Returns: Two [batch_size, num_context_dims] tensors. """ if self._maxval == self._minval: contexts = tf.constant( self._maxval, shape=[batch_size, 1], dtype=tf.int32) else: contexts = tf.random_uniform( shape=[batch_size, 1], dtype=tf.int32, maxval=self._maxval, minval=self._minval) next_contexts = tf.maximum(contexts + self._timestep, 0) return tf.cast( contexts, dtype=self._context_spec.dtype), tf.cast( next_contexts, dtype=self._context_spec.dtype) @gin.configurable class ConstantSampler(BaseSampler): """Constant sampler.""" def __init__(self, value=None, *args, **kwargs): """Construct sampler. Args: value: A list or Numpy array for values of the constant. *args: arguments. **kwargs: keyword arguments. """ super(ConstantSampler, self).__init__(*args, **kwargs) self._value = value def __call__(self, batch_size, **kwargs): """Sample a batch of context. Args: batch_size: Batch size. Returns: Two [batch_size, num_context_dims] tensors. """ spec = self._context_spec value_ = tf.constant(self._value, shape=spec.shape, dtype=spec.dtype) values = tf.tile( tf.expand_dims(value_, 0), (batch_size,) + (1,) * spec.shape.ndims) self._validate_contexts(values) return values, values @gin.configurable class DirectionSampler(RandomSampler): """Direction sampler.""" def __call__(self, batch_size, **kwargs): """Sample a batch of context. Args: batch_size: Batch size. Returns: Two [batch_size, num_context_dims] tensors. """ spec = self._context_spec context_range = self._context_range if isinstance(context_range[0], (int, float)): contexts = tf.random_uniform( shape=[ batch_size, ] + spec.shape.as_list(), minval=context_range[0], maxval=context_range[1], dtype=spec.dtype) elif isinstance(context_range[0], (list, tuple, np.ndarray)): assert len(spec.shape.as_list()) == 1 assert spec.shape.as_list()[0] == len(context_range[0]) assert spec.shape.as_list()[0] == len(context_range[1]) contexts = tf.concat( [ tf.random_uniform( shape=[ batch_size, 1, ] + spec.shape.as_list()[1:], minval=context_range[0][i], maxval=context_range[1][i], dtype=spec.dtype) for i in range(spec.shape.as_list()[0]) ], axis=1) else: raise NotImplementedError(context_range) self._validate_contexts(contexts) if 'sampler_fn' in kwargs: other_contexts = kwargs['sampler_fn']() else: other_contexts = contexts state, next_state = kwargs['state'], kwargs['next_state'] if state is not None and next_state is not None: my_context_range = (np.array(context_range[1]) - np.array(context_range[0])) / 2 * np.ones(spec.shape.as_list()) contexts = tf.concat( [0.1 * my_context_range[:self._k] * tf.random_normal(tf.shape(state[:, :self._k]), dtype=state.dtype) + tf.random_shuffle(state[:, :self._k]) - state[:, :self._k], other_contexts[:, self._k:]], 1) #contexts = tf.Print(contexts, # [contexts, tf.reduce_max(contexts, 0), # tf.reduce_min(state, 0), tf.reduce_max(state, 0)], 'contexts', summarize=15) next_contexts = tf.concat( #LALA [state[:, :self._k] + contexts[:, :self._k] - next_state[:, :self._k], other_contexts[:, self._k:]], 1) next_contexts = contexts #LALA cosine else: next_contexts = contexts return tf.stop_gradient(contexts), tf.stop_gradient(next_contexts)
14,171
30.775785
118
py
models
models-master/research/efficient-hrl/utils/utils.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow utility functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from copy import deepcopy import tensorflow as tf from tf_agents import specs from tf_agents.utils import common _tf_print_counts = dict() _tf_print_running_sums = dict() _tf_print_running_counts = dict() _tf_print_ids = 0 def get_contextual_env_base(env_base, begin_ops=None, end_ops=None): """Wrap env_base with additional tf ops.""" # pylint: disable=protected-access def init(self_, env_base): self_._env_base = env_base attribute_list = ["_render_mode", "_gym_env"] for attribute in attribute_list: if hasattr(env_base, attribute): setattr(self_, attribute, getattr(env_base, attribute)) if hasattr(env_base, "physics"): self_._physics = env_base.physics elif hasattr(env_base, "gym"): class Physics(object): def render(self, *args, **kwargs): return env_base.gym.render("rgb_array") physics = Physics() self_._physics = physics self_.physics = physics def set_sess(self_, sess): self_._sess = sess if hasattr(self_._env_base, "set_sess"): self_._env_base.set_sess(sess) def begin_episode(self_): self_._env_base.reset() if begin_ops is not None: self_._sess.run(begin_ops) def end_episode(self_): self_._env_base.reset() if end_ops is not None: self_._sess.run(end_ops) return type("ContextualEnvBase", (env_base.__class__,), dict( __init__=init, set_sess=set_sess, begin_episode=begin_episode, end_episode=end_episode, ))(env_base) # pylint: enable=protected-access def merge_specs(specs_): """Merge TensorSpecs. Args: specs_: List of TensorSpecs to be merged. Returns: a TensorSpec: a merged TensorSpec. """ shape = specs_[0].shape dtype = specs_[0].dtype name = specs_[0].name for spec in specs_[1:]: assert shape[1:] == spec.shape[1:], "incompatible shapes: %s, %s" % ( shape, spec.shape) assert dtype == spec.dtype, "incompatible dtypes: %s, %s" % ( dtype, spec.dtype) shape = merge_shapes((shape, spec.shape), axis=0) return specs.TensorSpec( shape=shape, dtype=dtype, name=name, ) def merge_shapes(shapes, axis=0): """Merge TensorShapes. Args: shapes: List of TensorShapes to be merged. axis: optional, the axis to merge shaped. Returns: a TensorShape: a merged TensorShape. """ assert len(shapes) > 1 dims = deepcopy(shapes[0].dims) for shape in shapes[1:]: assert shapes[0].ndims == shape.ndims dims[axis] += shape.dims[axis] return tf.TensorShape(dims=dims) def get_all_vars(ignore_scopes=None): """Get all tf variables in scope. Args: ignore_scopes: A list of scope names to ignore. Returns: A list of all tf variables in scope. """ all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) all_vars = [var for var in all_vars if ignore_scopes is None or not any(var.name.startswith(scope) for scope in ignore_scopes)] return all_vars def clip(tensor, range_=None): """Return a tf op which clips tensor according to range_. Args: tensor: A Tensor to be clipped. range_: None, or a tuple representing (minval, maxval) Returns: A clipped Tensor. """ if range_ is None: return tf.identity(tensor) elif isinstance(range_, (tuple, list)): assert len(range_) == 2 return tf.clip_by_value(tensor, range_[0], range_[1]) else: raise NotImplementedError("Unacceptable range input: %r" % range_) def clip_to_bounds(value, minimum, maximum): """Clips value to be between minimum and maximum. Args: value: (tensor) value to be clipped. minimum: (numpy float array) minimum value to clip to. maximum: (numpy float array) maximum value to clip to. Returns: clipped_value: (tensor) `value` clipped to between `minimum` and `maximum`. """ value = tf.minimum(value, maximum) return tf.maximum(value, minimum) clip_to_spec = common.clip_to_spec def _clip_to_spec(value, spec): """Clips value to a given bounded tensor spec. Args: value: (tensor) value to be clipped. spec: (BoundedTensorSpec) spec containing min. and max. values for clipping. Returns: clipped_value: (tensor) `value` clipped to be compatible with `spec`. """ return clip_to_bounds(value, spec.minimum, spec.maximum) join_scope = common.join_scope def _join_scope(parent_scope, child_scope): """Joins a parent and child scope using `/`, checking for empty/none. Args: parent_scope: (string) parent/prefix scope. child_scope: (string) child/suffix scope. Returns: joined scope: (string) parent and child scopes joined by /. """ if not parent_scope: return child_scope if not child_scope: return parent_scope return '/'.join([parent_scope, child_scope]) def assign_vars(vars_, values): """Returns the update ops for assigning a list of vars. Args: vars_: A list of variables. values: A list of tensors representing new values. Returns: A list of update ops for the variables. """ return [var.assign(value) for var, value in zip(vars_, values)] def identity_vars(vars_): """Return the identity ops for a list of tensors. Args: vars_: A list of tensors. Returns: A list of identity ops. """ return [tf.identity(var) for var in vars_] def tile(var, batch_size=1): """Return tiled tensor. Args: var: A tensor representing the state. batch_size: Batch size. Returns: A tensor with shape [batch_size,] + var.shape. """ batch_var = tf.tile( tf.expand_dims(var, 0), (batch_size,) + (1,) * var.get_shape().ndims) return batch_var def batch_list(vars_list): """Batch a list of variables. Args: vars_list: A list of tensor variables. Returns: A list of tensor variables with additional first dimension. """ return [tf.expand_dims(var, 0) for var in vars_list] def tf_print(op, tensors, message="", first_n=-1, name=None, sub_messages=None, print_freq=-1, include_count=True): """tf.Print, but to stdout.""" # TODO(shanegu): `name` is deprecated. Remove from the rest of codes. global _tf_print_ids _tf_print_ids += 1 name = _tf_print_ids _tf_print_counts[name] = 0 if print_freq > 0: _tf_print_running_sums[name] = [0 for _ in tensors] _tf_print_running_counts[name] = 0 def print_message(*xs): """print message fn.""" _tf_print_counts[name] += 1 if print_freq > 0: for i, x in enumerate(xs): _tf_print_running_sums[name][i] += x _tf_print_running_counts[name] += 1 if (print_freq <= 0 or _tf_print_running_counts[name] >= print_freq) and ( first_n < 0 or _tf_print_counts[name] <= first_n): for i, x in enumerate(xs): if print_freq > 0: del x x = _tf_print_running_sums[name][i]/_tf_print_running_counts[name] if sub_messages is None: sub_message = str(i) else: sub_message = sub_messages[i] log_message = "%s, %s" % (message, sub_message) if include_count: log_message += ", count=%d" % _tf_print_counts[name] tf.logging.info("[%s]: %s" % (log_message, x)) if print_freq > 0: for i, x in enumerate(xs): _tf_print_running_sums[name][i] = 0 _tf_print_running_counts[name] = 0 return xs[0] print_op = tf.py_func(print_message, tensors, tensors[0].dtype) with tf.control_dependencies([print_op]): op = tf.identity(op) return op periodically = common.periodically def _periodically(body, period, name='periodically'): """Periodically performs a tensorflow op.""" if period is None or period == 0: return tf.no_op() if period < 0: raise ValueError("period cannot be less than 0.") if period == 1: return body() with tf.variable_scope(None, default_name=name): counter = tf.get_variable( "counter", shape=[], dtype=tf.int64, trainable=False, initializer=tf.constant_initializer(period, dtype=tf.int64)) def _wrapped_body(): with tf.control_dependencies([body()]): return counter.assign(1) update = tf.cond( tf.equal(counter, period), _wrapped_body, lambda: counter.assign_add(1)) return update soft_variables_update = common.soft_variables_update
9,224
27.918495
80
py
models
models-master/research/efficient-hrl/utils/__init__.py
1
0
0
py
models
models-master/research/efficient-hrl/utils/eval_utils.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Evaluation utility functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from collections import namedtuple logging = tf.logging import gin.tf @gin.configurable def evaluate_checkpoint_repeatedly(checkpoint_dir, evaluate_checkpoint_fn, eval_interval_secs=600, max_number_of_evaluations=None, checkpoint_timeout=None, timeout_fn=None): """Evaluates a checkpointed model at a set interval.""" if max_number_of_evaluations is not None and max_number_of_evaluations <= 0: raise ValueError( '`max_number_of_evaluations` must be either None or a positive number.') number_of_evaluations = 0 for checkpoint_path in tf.contrib.training.checkpoints_iterator( checkpoint_dir, min_interval_secs=eval_interval_secs, timeout=checkpoint_timeout, timeout_fn=timeout_fn): retries = 3 for _ in range(retries): try: should_stop = evaluate_checkpoint_fn(checkpoint_path) break except tf.errors.DataLossError as e: logging.warn( 'Encountered a DataLossError while evaluating a checkpoint. This ' 'can happen when reading a checkpoint before it is fully written. ' 'Retrying...' ) time.sleep(2.0) def compute_model_loss(sess, model_rollout_fn, states, actions): """Computes model loss.""" preds, losses = [], [] preds.append(states[0]) losses.append(0) for state, action in zip(states[1:], actions[1:]): pred = model_rollout_fn(sess, preds[-1], action) loss = np.sqrt(np.sum((state - pred) ** 2)) preds.append(pred) losses.append(loss) return preds, losses def compute_average_reward(sess, env_base, step_fn, gamma, num_steps, num_episodes): """Computes the discounted reward for a given number of steps. Args: sess: The tensorflow session. env_base: A python environment. step_fn: A function that takes in `sess` and returns a list of [state, action, reward, discount, transition_type] values. gamma: discounting factor to apply to the reward. num_steps: number of steps to compute the reward over. num_episodes: number of episodes to average the reward over. Returns: average_reward: a scalar of discounted reward. last_reward: last reward received. """ average_reward = 0 average_last_reward = 0 average_meta_reward = 0 average_last_meta_reward = 0 average_success = 0. states, actions = None, None for i in range(num_episodes): env_base.end_episode() env_base.begin_episode() (reward, last_reward, meta_reward, last_meta_reward, states, actions) = compute_reward( sess, step_fn, gamma, num_steps) s_reward = last_meta_reward # Navigation success = (s_reward > -5.0) # When using diff=False logging.info('Episode = %d, reward = %s, meta_reward = %f, ' 'last_reward = %s, last meta_reward = %f, success = %s', i, reward, meta_reward, last_reward, last_meta_reward, success) average_reward += reward average_last_reward += last_reward average_meta_reward += meta_reward average_last_meta_reward += last_meta_reward average_success += success average_reward /= num_episodes average_last_reward /= num_episodes average_meta_reward /= num_episodes average_last_meta_reward /= num_episodes average_success /= num_episodes return (average_reward, average_last_reward, average_meta_reward, average_last_meta_reward, average_success, states, actions) def compute_reward(sess, step_fn, gamma, num_steps): """Computes the discounted reward for a given number of steps. Args: sess: The tensorflow session. step_fn: A function that takes in `sess` and returns a list of [state, action, reward, discount, transition_type] values. gamma: discounting factor to apply to the reward. num_steps: number of steps to compute the reward over. Returns: reward: cumulative discounted reward. last_reward: reward received at final step. """ total_reward = 0 total_meta_reward = 0 gamma_step = 1 states = [] actions = [] for _ in range(num_steps): state, action, transition_type, reward, meta_reward, discount, _, _ = step_fn(sess) total_reward += reward * gamma_step * discount total_meta_reward += meta_reward * gamma_step * discount gamma_step *= gamma states.append(state) actions.append(action) return (total_reward, reward, total_meta_reward, meta_reward, states, actions)
5,521
35.328947
87
py
models
models-master/research/efficient-hrl/environments/point_maze_env.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from environments.maze_env import MazeEnv from environments.point import PointEnv class PointMazeEnv(MazeEnv): MODEL_CLASS = PointEnv
829
36.727273
80
py
models
models-master/research/efficient-hrl/environments/ant.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper for creating the ant environment in gym_mujoco.""" import math import numpy as np import mujoco_py from gym import utils from gym.envs.mujoco import mujoco_env def q_inv(a): return [a[0], -a[1], -a[2], -a[3]] def q_mult(a, b): # multiply two quaternion w = a[0] * b[0] - a[1] * b[1] - a[2] * b[2] - a[3] * b[3] i = a[0] * b[1] + a[1] * b[0] + a[2] * b[3] - a[3] * b[2] j = a[0] * b[2] - a[1] * b[3] + a[2] * b[0] + a[3] * b[1] k = a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + a[3] * b[0] return [w, i, j, k] class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle): FILE = "ant.xml" ORI_IND = 3 def __init__(self, file_path=None, expose_all_qpos=True, expose_body_coms=None, expose_body_comvels=None): self._expose_all_qpos = expose_all_qpos self._expose_body_coms = expose_body_coms self._expose_body_comvels = expose_body_comvels self._body_com_indices = {} self._body_comvel_indices = {} mujoco_env.MujocoEnv.__init__(self, file_path, 5) utils.EzPickle.__init__(self) @property def physics(self): # check mujoco version is greater than version 1.50 to call correct physics # model containing PyMjData object for getting and setting position/velocity # check https://github.com/openai/mujoco-py/issues/80 for updates to api if mujoco_py.get_version() >= '1.50': return self.sim else: return self.model def _step(self, a): return self.step(a) def step(self, a): xposbefore = self.get_body_com("torso")[0] self.do_simulation(a, self.frame_skip) xposafter = self.get_body_com("torso")[0] forward_reward = (xposafter - xposbefore) / self.dt ctrl_cost = .5 * np.square(a).sum() survive_reward = 1.0 reward = forward_reward - ctrl_cost + survive_reward state = self.state_vector() done = False ob = self._get_obs() return ob, reward, done, dict( reward_forward=forward_reward, reward_ctrl=-ctrl_cost, reward_survive=survive_reward) def _get_obs(self): # No cfrc observation if self._expose_all_qpos: obs = np.concatenate([ self.physics.data.qpos.flat[:15], # Ensures only ant obs. self.physics.data.qvel.flat[:14], ]) else: obs = np.concatenate([ self.physics.data.qpos.flat[2:15], self.physics.data.qvel.flat[:14], ]) if self._expose_body_coms is not None: for name in self._expose_body_coms: com = self.get_body_com(name) if name not in self._body_com_indices: indices = range(len(obs), len(obs) + len(com)) self._body_com_indices[name] = indices obs = np.concatenate([obs, com]) if self._expose_body_comvels is not None: for name in self._expose_body_comvels: comvel = self.get_body_comvel(name) if name not in self._body_comvel_indices: indices = range(len(obs), len(obs) + len(comvel)) self._body_comvel_indices[name] = indices obs = np.concatenate([obs, comvel]) return obs def reset_model(self): qpos = self.init_qpos + self.np_random.uniform( size=self.model.nq, low=-.1, high=.1) qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 # Set everything other than ant to original position and 0 velocity. qpos[15:] = self.init_qpos[15:] qvel[14:] = 0. self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 def get_ori(self): ori = [0, 1, 0, 0] rot = self.physics.data.qpos[self.__class__.ORI_IND:self.__class__.ORI_IND + 4] # take the quaternion ori = q_mult(q_mult(rot, ori), q_inv(rot))[1:3] # project onto x-y plane ori = math.atan2(ori[1], ori[0]) return ori def set_xy(self, xy): qpos = np.copy(self.physics.data.qpos) qpos[0] = xy[0] qpos[1] = xy[1] qvel = self.physics.data.qvel self.set_state(qpos, qvel) def get_xy(self): return self.physics.data.qpos[:2]
4,728
32.302817
106
py
models
models-master/research/efficient-hrl/environments/point.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper for creating the ant environment in gym_mujoco.""" import math import numpy as np import mujoco_py from gym import utils from gym.envs.mujoco import mujoco_env class PointEnv(mujoco_env.MujocoEnv, utils.EzPickle): FILE = "point.xml" ORI_IND = 2 def __init__(self, file_path=None, expose_all_qpos=True): self._expose_all_qpos = expose_all_qpos mujoco_env.MujocoEnv.__init__(self, file_path, 1) utils.EzPickle.__init__(self) @property def physics(self): # check mujoco version is greater than version 1.50 to call correct physics # model containing PyMjData object for getting and setting position/velocity # check https://github.com/openai/mujoco-py/issues/80 for updates to api if mujoco_py.get_version() >= '1.50': return self.sim else: return self.model def _step(self, a): return self.step(a) def step(self, action): action[0] = 0.2 * action[0] qpos = np.copy(self.physics.data.qpos) qpos[2] += action[1] ori = qpos[2] # compute increment in each direction dx = math.cos(ori) * action[0] dy = math.sin(ori) * action[0] # ensure that the robot is within reasonable range qpos[0] = np.clip(qpos[0] + dx, -100, 100) qpos[1] = np.clip(qpos[1] + dy, -100, 100) qvel = self.physics.data.qvel self.set_state(qpos, qvel) for _ in range(0, self.frame_skip): self.physics.step() next_obs = self._get_obs() reward = 0 done = False info = {} return next_obs, reward, done, info def _get_obs(self): if self._expose_all_qpos: return np.concatenate([ self.physics.data.qpos.flat[:3], # Only point-relevant coords. self.physics.data.qvel.flat[:3]]) return np.concatenate([ self.physics.data.qpos.flat[2:3], self.physics.data.qvel.flat[:3]]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform( size=self.physics.model.nq, low=-.1, high=.1) qvel = self.init_qvel + self.np_random.randn(self.physics.model.nv) * .1 # Set everything other than point to original position and 0 velocity. qpos[3:] = self.init_qpos[3:] qvel[3:] = 0. self.set_state(qpos, qvel) return self._get_obs() def get_ori(self): return self.physics.data.qpos[self.__class__.ORI_IND] def set_xy(self, xy): qpos = np.copy(self.physics.data.qpos) qpos[0] = xy[0] qpos[1] = xy[1] qvel = self.physics.data.qvel
3,150
31.153061
80
py
models
models-master/research/efficient-hrl/environments/maze_env.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adapted from rllab maze_env.py.""" import os import tempfile import xml.etree.ElementTree as ET import math import numpy as np import gym from environments import maze_env_utils # Directory that contains mujoco xml files. MODEL_DIR = 'environments/assets' class MazeEnv(gym.Env): MODEL_CLASS = None MAZE_HEIGHT = None MAZE_SIZE_SCALING = None def __init__( self, maze_id=None, maze_height=0.5, maze_size_scaling=8, n_bins=0, sensor_range=3., sensor_span=2 * math.pi, observe_blocks=False, put_spin_near_agent=False, top_down_view=False, manual_collision=False, *args, **kwargs): self._maze_id = maze_id model_cls = self.__class__.MODEL_CLASS if model_cls is None: raise "MODEL_CLASS unspecified!" xml_path = os.path.join(MODEL_DIR, model_cls.FILE) tree = ET.parse(xml_path) worldbody = tree.find(".//worldbody") self.MAZE_HEIGHT = height = maze_height self.MAZE_SIZE_SCALING = size_scaling = maze_size_scaling self._n_bins = n_bins self._sensor_range = sensor_range * size_scaling self._sensor_span = sensor_span self._observe_blocks = observe_blocks self._put_spin_near_agent = put_spin_near_agent self._top_down_view = top_down_view self._manual_collision = manual_collision self.MAZE_STRUCTURE = structure = maze_env_utils.construct_maze(maze_id=self._maze_id) self.elevated = any(-1 in row for row in structure) # Elevate the maze to allow for falling. self.blocks = any( any(maze_env_utils.can_move(r) for r in row) for row in structure) # Are there any movable blocks? torso_x, torso_y = self._find_robot() self._init_torso_x = torso_x self._init_torso_y = torso_y self._init_positions = [ (x - torso_x, y - torso_y) for x, y in self._find_all_robots()] self._xy_to_rowcol = lambda x, y: (2 + (y + size_scaling / 2) / size_scaling, 2 + (x + size_scaling / 2) / size_scaling) self._view = np.zeros([5, 5, 3]) # walls (immovable), chasms (fall), movable blocks height_offset = 0. if self.elevated: # Increase initial z-pos of ant. height_offset = height * size_scaling torso = tree.find(".//body[@name='torso']") torso.set('pos', '0 0 %.2f' % (0.75 + height_offset)) if self.blocks: # If there are movable blocks, change simulation settings to perform # better contact detection. default = tree.find(".//default") default.find('.//geom').set('solimp', '.995 .995 .01') self.movable_blocks = [] for i in range(len(structure)): for j in range(len(structure[0])): struct = structure[i][j] if struct == 'r' and self._put_spin_near_agent: struct = maze_env_utils.Move.SpinXY if self.elevated and struct not in [-1]: # Create elevated platform. ET.SubElement( worldbody, "geom", name="elevated_%d_%d" % (i, j), pos="%f %f %f" % (j * size_scaling - torso_x, i * size_scaling - torso_y, height / 2 * size_scaling), size="%f %f %f" % (0.5 * size_scaling, 0.5 * size_scaling, height / 2 * size_scaling), type="box", material="", contype="1", conaffinity="1", rgba="0.9 0.9 0.9 1", ) if struct == 1: # Unmovable block. # Offset all coordinates so that robot starts at the origin. ET.SubElement( worldbody, "geom", name="block_%d_%d" % (i, j), pos="%f %f %f" % (j * size_scaling - torso_x, i * size_scaling - torso_y, height_offset + height / 2 * size_scaling), size="%f %f %f" % (0.5 * size_scaling, 0.5 * size_scaling, height / 2 * size_scaling), type="box", material="", contype="1", conaffinity="1", rgba="0.4 0.4 0.4 1", ) elif maze_env_utils.can_move(struct): # Movable block. # The "falling" blocks are shrunk slightly and increased in mass to # ensure that it can fall easily through a gap in the platform blocks. name = "movable_%d_%d" % (i, j) self.movable_blocks.append((name, struct)) falling = maze_env_utils.can_move_z(struct) spinning = maze_env_utils.can_spin(struct) x_offset = 0.25 * size_scaling if spinning else 0.0 y_offset = 0.0 shrink = 0.1 if spinning else 0.99 if falling else 1.0 height_shrink = 0.1 if spinning else 1.0 movable_body = ET.SubElement( worldbody, "body", name=name, pos="%f %f %f" % (j * size_scaling - torso_x + x_offset, i * size_scaling - torso_y + y_offset, height_offset + height / 2 * size_scaling * height_shrink), ) ET.SubElement( movable_body, "geom", name="block_%d_%d" % (i, j), pos="0 0 0", size="%f %f %f" % (0.5 * size_scaling * shrink, 0.5 * size_scaling * shrink, height / 2 * size_scaling * height_shrink), type="box", material="", mass="0.001" if falling else "0.0002", contype="1", conaffinity="1", rgba="0.9 0.1 0.1 1" ) if maze_env_utils.can_move_x(struct): ET.SubElement( movable_body, "joint", armature="0", axis="1 0 0", damping="0.0", limited="true" if falling else "false", range="%f %f" % (-size_scaling, size_scaling), margin="0.01", name="movable_x_%d_%d" % (i, j), pos="0 0 0", type="slide" ) if maze_env_utils.can_move_y(struct): ET.SubElement( movable_body, "joint", armature="0", axis="0 1 0", damping="0.0", limited="true" if falling else "false", range="%f %f" % (-size_scaling, size_scaling), margin="0.01", name="movable_y_%d_%d" % (i, j), pos="0 0 0", type="slide" ) if maze_env_utils.can_move_z(struct): ET.SubElement( movable_body, "joint", armature="0", axis="0 0 1", damping="0.0", limited="true", range="%f 0" % (-height_offset), margin="0.01", name="movable_z_%d_%d" % (i, j), pos="0 0 0", type="slide" ) if maze_env_utils.can_spin(struct): ET.SubElement( movable_body, "joint", armature="0", axis="0 0 1", damping="0.0", limited="false", name="spinable_%d_%d" % (i, j), pos="0 0 0", type="ball" ) torso = tree.find(".//body[@name='torso']") geoms = torso.findall(".//geom") for geom in geoms: if 'name' not in geom.attrib: raise Exception("Every geom of the torso must have a name " "defined") _, file_path = tempfile.mkstemp(text=True, suffix='.xml') tree.write(file_path) self.wrapped_env = model_cls(*args, file_path=file_path, **kwargs) def get_ori(self): return self.wrapped_env.get_ori() def get_top_down_view(self): self._view = np.zeros_like(self._view) def valid(row, col): return self._view.shape[0] > row >= 0 and self._view.shape[1] > col >= 0 def update_view(x, y, d, row=None, col=None): if row is None or col is None: x = x - self._robot_x y = y - self._robot_y th = self._robot_ori row, col = self._xy_to_rowcol(x, y) update_view(x, y, d, row=row, col=col) return row, row_frac, col, col_frac = int(row), row % 1, int(col), col % 1 if row_frac < 0: row_frac += 1 if col_frac < 0: col_frac += 1 if valid(row, col): self._view[row, col, d] += ( (min(1., row_frac + 0.5) - max(0., row_frac - 0.5)) * (min(1., col_frac + 0.5) - max(0., col_frac - 0.5))) if valid(row - 1, col): self._view[row - 1, col, d] += ( (max(0., 0.5 - row_frac)) * (min(1., col_frac + 0.5) - max(0., col_frac - 0.5))) if valid(row + 1, col): self._view[row + 1, col, d] += ( (max(0., row_frac - 0.5)) * (min(1., col_frac + 0.5) - max(0., col_frac - 0.5))) if valid(row, col - 1): self._view[row, col - 1, d] += ( (min(1., row_frac + 0.5) - max(0., row_frac - 0.5)) * (max(0., 0.5 - col_frac))) if valid(row, col + 1): self._view[row, col + 1, d] += ( (min(1., row_frac + 0.5) - max(0., row_frac - 0.5)) * (max(0., col_frac - 0.5))) if valid(row - 1, col - 1): self._view[row - 1, col - 1, d] += ( (max(0., 0.5 - row_frac)) * max(0., 0.5 - col_frac)) if valid(row - 1, col + 1): self._view[row - 1, col + 1, d] += ( (max(0., 0.5 - row_frac)) * max(0., col_frac - 0.5)) if valid(row + 1, col + 1): self._view[row + 1, col + 1, d] += ( (max(0., row_frac - 0.5)) * max(0., col_frac - 0.5)) if valid(row + 1, col - 1): self._view[row + 1, col - 1, d] += ( (max(0., row_frac - 0.5)) * max(0., 0.5 - col_frac)) # Draw ant. robot_x, robot_y = self.wrapped_env.get_body_com("torso")[:2] self._robot_x = robot_x self._robot_y = robot_y self._robot_ori = self.get_ori() structure = self.MAZE_STRUCTURE size_scaling = self.MAZE_SIZE_SCALING height = self.MAZE_HEIGHT # Draw immovable blocks and chasms. for i in range(len(structure)): for j in range(len(structure[0])): if structure[i][j] == 1: # Wall. update_view(j * size_scaling - self._init_torso_x, i * size_scaling - self._init_torso_y, 0) if structure[i][j] == -1: # Chasm. update_view(j * size_scaling - self._init_torso_x, i * size_scaling - self._init_torso_y, 1) # Draw movable blocks. for block_name, block_type in self.movable_blocks: block_x, block_y = self.wrapped_env.get_body_com(block_name)[:2] update_view(block_x, block_y, 2) return self._view def get_range_sensor_obs(self): """Returns egocentric range sensor observations of maze.""" robot_x, robot_y, robot_z = self.wrapped_env.get_body_com("torso")[:3] ori = self.get_ori() structure = self.MAZE_STRUCTURE size_scaling = self.MAZE_SIZE_SCALING height = self.MAZE_HEIGHT segments = [] # Get line segments (corresponding to outer boundary) of each immovable # block or drop-off. for i in range(len(structure)): for j in range(len(structure[0])): if structure[i][j] in [1, -1]: # There's a wall or drop-off. cx = j * size_scaling - self._init_torso_x cy = i * size_scaling - self._init_torso_y x1 = cx - 0.5 * size_scaling x2 = cx + 0.5 * size_scaling y1 = cy - 0.5 * size_scaling y2 = cy + 0.5 * size_scaling struct_segments = [ ((x1, y1), (x2, y1)), ((x2, y1), (x2, y2)), ((x2, y2), (x1, y2)), ((x1, y2), (x1, y1)), ] for seg in struct_segments: segments.append(dict( segment=seg, type=structure[i][j], )) # Get line segments (corresponding to outer boundary) of each movable # block within the agent's z-view. for block_name, block_type in self.movable_blocks: block_x, block_y, block_z = self.wrapped_env.get_body_com(block_name)[:3] if (block_z + height * size_scaling / 2 >= robot_z and robot_z >= block_z - height * size_scaling / 2): # Block in view. x1 = block_x - 0.5 * size_scaling x2 = block_x + 0.5 * size_scaling y1 = block_y - 0.5 * size_scaling y2 = block_y + 0.5 * size_scaling struct_segments = [ ((x1, y1), (x2, y1)), ((x2, y1), (x2, y2)), ((x2, y2), (x1, y2)), ((x1, y2), (x1, y1)), ] for seg in struct_segments: segments.append(dict( segment=seg, type=block_type, )) sensor_readings = np.zeros((self._n_bins, 3)) # 3 for wall, drop-off, block for ray_idx in range(self._n_bins): ray_ori = (ori - self._sensor_span * 0.5 + (2 * ray_idx + 1.0) / (2 * self._n_bins) * self._sensor_span) ray_segments = [] # Get all segments that intersect with ray. for seg in segments: p = maze_env_utils.ray_segment_intersect( ray=((robot_x, robot_y), ray_ori), segment=seg["segment"]) if p is not None: ray_segments.append(dict( segment=seg["segment"], type=seg["type"], ray_ori=ray_ori, distance=maze_env_utils.point_distance(p, (robot_x, robot_y)), )) if len(ray_segments) > 0: # Find out which segment is intersected first. first_seg = sorted(ray_segments, key=lambda x: x["distance"])[0] seg_type = first_seg["type"] idx = (0 if seg_type == 1 else # Wall. 1 if seg_type == -1 else # Drop-off. 2 if maze_env_utils.can_move(seg_type) else # Block. None) if first_seg["distance"] <= self._sensor_range: sensor_readings[ray_idx][idx] = (self._sensor_range - first_seg["distance"]) / self._sensor_range return sensor_readings def _get_obs(self): wrapped_obs = self.wrapped_env._get_obs() if self._top_down_view: view = [self.get_top_down_view().flat] else: view = [] if self._observe_blocks: additional_obs = [] for block_name, block_type in self.movable_blocks: additional_obs.append(self.wrapped_env.get_body_com(block_name)) wrapped_obs = np.concatenate([wrapped_obs[:3]] + additional_obs + [wrapped_obs[3:]]) range_sensor_obs = self.get_range_sensor_obs() return np.concatenate([wrapped_obs, range_sensor_obs.flat] + view + [[self.t * 0.001]]) def reset(self): self.t = 0 self.trajectory = [] self.wrapped_env.reset() if len(self._init_positions) > 1: xy = random.choice(self._init_positions) self.wrapped_env.set_xy(xy) return self._get_obs() @property def viewer(self): return self.wrapped_env.viewer def render(self, *args, **kwargs): return self.wrapped_env.render(*args, **kwargs) @property def observation_space(self): shape = self._get_obs().shape high = np.inf * np.ones(shape) low = -high return gym.spaces.Box(low, high) @property def action_space(self): return self.wrapped_env.action_space def _find_robot(self): structure = self.MAZE_STRUCTURE size_scaling = self.MAZE_SIZE_SCALING for i in range(len(structure)): for j in range(len(structure[0])): if structure[i][j] == 'r': return j * size_scaling, i * size_scaling assert False, 'No robot in maze specification.' def _find_all_robots(self): structure = self.MAZE_STRUCTURE size_scaling = self.MAZE_SIZE_SCALING coords = [] for i in range(len(structure)): for j in range(len(structure[0])): if structure[i][j] == 'r': coords.append((j * size_scaling, i * size_scaling)) return coords def _is_in_collision(self, pos): x, y = pos structure = self.MAZE_STRUCTURE size_scaling = self.MAZE_SIZE_SCALING for i in range(len(structure)): for j in range(len(structure[0])): if structure[i][j] == 1: minx = j * size_scaling - size_scaling * 0.5 - self._init_torso_x maxx = j * size_scaling + size_scaling * 0.5 - self._init_torso_x miny = i * size_scaling - size_scaling * 0.5 - self._init_torso_y maxy = i * size_scaling + size_scaling * 0.5 - self._init_torso_y if minx <= x <= maxx and miny <= y <= maxy: return True return False def step(self, action): self.t += 1 if self._manual_collision: old_pos = self.wrapped_env.get_xy() inner_next_obs, inner_reward, done, info = self.wrapped_env.step(action) new_pos = self.wrapped_env.get_xy() if self._is_in_collision(new_pos): self.wrapped_env.set_xy(old_pos) else: inner_next_obs, inner_reward, done, info = self.wrapped_env.step(action) next_obs = self._get_obs() done = False return next_obs, inner_reward, done, info
18,328
35.658
107
py
models
models-master/research/efficient-hrl/environments/create_maze_env.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from environments.ant_maze_env import AntMazeEnv from environments.point_maze_env import PointMazeEnv import tensorflow as tf import gin.tf from tf_agents.environments import gym_wrapper from tf_agents.environments import tf_py_environment @gin.configurable def create_maze_env(env_name=None, top_down_view=False): n_bins = 0 manual_collision = False if env_name.startswith('Ego'): n_bins = 8 env_name = env_name[3:] if env_name.startswith('Ant'): cls = AntMazeEnv env_name = env_name[3:] maze_size_scaling = 8 elif env_name.startswith('Point'): cls = PointMazeEnv manual_collision = True env_name = env_name[5:] maze_size_scaling = 4 else: assert False, 'unknown env %s' % env_name maze_id = None observe_blocks = False put_spin_near_agent = False if env_name == 'Maze': maze_id = 'Maze' elif env_name == 'Push': maze_id = 'Push' elif env_name == 'Fall': maze_id = 'Fall' elif env_name == 'Block': maze_id = 'Block' put_spin_near_agent = True observe_blocks = True elif env_name == 'BlockMaze': maze_id = 'BlockMaze' put_spin_near_agent = True observe_blocks = True else: raise ValueError('Unknown maze environment %s' % env_name) gym_mujoco_kwargs = { 'maze_id': maze_id, 'n_bins': n_bins, 'observe_blocks': observe_blocks, 'put_spin_near_agent': put_spin_near_agent, 'top_down_view': top_down_view, 'manual_collision': manual_collision, 'maze_size_scaling': maze_size_scaling } gym_env = cls(**gym_mujoco_kwargs) gym_env.reset() wrapped_env = gym_wrapper.GymWrapper(gym_env) return wrapped_env class TFPyEnvironment(tf_py_environment.TFPyEnvironment): def __init__(self, *args, **kwargs): super(TFPyEnvironment, self).__init__(*args, **kwargs) def start_collect(self): pass def current_obs(self): time_step = self.current_time_step() return time_step.observation[0] # For some reason, there is an extra dim. def step(self, actions): actions = tf.expand_dims(actions, 0) next_step = super(TFPyEnvironment, self).step(actions) return next_step.is_last()[0], next_step.reward[0], next_step.discount[0] def reset(self): return super(TFPyEnvironment, self).reset()
2,971
29.326531
80
py
models
models-master/research/efficient-hrl/environments/maze_env_utils.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adapted from rllab maze_env_utils.py.""" import numpy as np import math class Move(object): X = 11 Y = 12 Z = 13 XY = 14 XZ = 15 YZ = 16 XYZ = 17 SpinXY = 18 def can_move_x(movable): return movable in [Move.X, Move.XY, Move.XZ, Move.XYZ, Move.SpinXY] def can_move_y(movable): return movable in [Move.Y, Move.XY, Move.YZ, Move.XYZ, Move.SpinXY] def can_move_z(movable): return movable in [Move.Z, Move.XZ, Move.YZ, Move.XYZ] def can_spin(movable): return movable in [Move.SpinXY] def can_move(movable): return can_move_x(movable) or can_move_y(movable) or can_move_z(movable) def construct_maze(maze_id='Maze'): if maze_id == 'Maze': structure = [ [1, 1, 1, 1, 1], [1, 'r', 0, 0, 1], [1, 1, 1, 0, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1], ] elif maze_id == 'Push': structure = [ [1, 1, 1, 1, 1], [1, 0, 'r', 1, 1], [1, 0, Move.XY, 0, 1], [1, 1, 0, 1, 1], [1, 1, 1, 1, 1], ] elif maze_id == 'Fall': structure = [ [1, 1, 1, 1], [1, 'r', 0, 1], [1, 0, Move.YZ, 1], [1, -1, -1, 1], [1, 0, 0, 1], [1, 1, 1, 1], ] elif maze_id == 'Block': O = 'r' structure = [ [1, 1, 1, 1, 1], [1, O, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1], ] elif maze_id == 'BlockMaze': O = 'r' structure = [ [1, 1, 1, 1], [1, O, 0, 1], [1, 1, 0, 1], [1, 0, 0, 1], [1, 1, 1, 1], ] else: raise NotImplementedError('The provided MazeId %s is not recognized' % maze_id) return structure def line_intersect(pt1, pt2, ptA, ptB): """ Taken from https://www.cs.hmc.edu/ACM/lectures/intersections.html this returns the intersection of Line(pt1,pt2) and Line(ptA,ptB) """ DET_TOLERANCE = 0.00000001 # the first line is pt1 + r*(pt2-pt1) # in component form: x1, y1 = pt1 x2, y2 = pt2 dx1 = x2 - x1 dy1 = y2 - y1 # the second line is ptA + s*(ptB-ptA) x, y = ptA xB, yB = ptB dx = xB - x dy = yB - y DET = (-dx1 * dy + dy1 * dx) if math.fabs(DET) < DET_TOLERANCE: return (0, 0, 0, 0, 0) # now, the determinant should be OK DETinv = 1.0 / DET # find the scalar amount along the "self" segment r = DETinv * (-dy * (x - x1) + dx * (y - y1)) # find the scalar amount along the input line s = DETinv * (-dy1 * (x - x1) + dx1 * (y - y1)) # return the average of the two descriptions xi = (x1 + r * dx1 + x + s * dx) / 2.0 yi = (y1 + r * dy1 + y + s * dy) / 2.0 return (xi, yi, 1, r, s) def ray_segment_intersect(ray, segment): """ Check if the ray originated from (x, y) with direction theta intersects the line segment (x1, y1) -- (x2, y2), and return the intersection point if there is one """ (x, y), theta = ray # (x1, y1), (x2, y2) = segment pt1 = (x, y) len = 1 pt2 = (x + len * math.cos(theta), y + len * math.sin(theta)) xo, yo, valid, r, s = line_intersect(pt1, pt2, *segment) if valid and r >= 0 and 0 <= s <= 1: return (xo, yo) return None def point_distance(p1, p2): x1, y1 = p1 x2, y2 = p2 return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
4,001
23.254545
112
py
models
models-master/research/efficient-hrl/environments/__init__.py
1
0
0
py
models
models-master/research/efficient-hrl/environments/ant_maze_env.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from environments.maze_env import MazeEnv from environments.ant import AntEnv class AntMazeEnv(MazeEnv): MODEL_CLASS = AntEnv
821
36.363636
80
py
models
models-master/research/object_detection/export_inference_graph.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Tool to export an object detection model for inference. Prepares an object detection tensorflow graph for inference using model configuration and a trained checkpoint. Outputs inference graph, associated checkpoint files, a frozen inference graph and a SavedModel (https://tensorflow.github.io/serving/serving_basic.html). The inference graph contains one of three input nodes depending on the user specified option. * `image_tensor`: Accepts a uint8 4-D tensor of shape [None, None, None, 3] * `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None] containing encoded PNG or JPEG images. Image resolutions are expected to be the same if more than 1 image is provided. * `tf_example`: Accepts a 1-D string tensor of shape [None] containing serialized TFExample protos. Image resolutions are expected to be the same if more than 1 image is provided. and the following output nodes returned by the model.postprocess(..): * `num_detections`: Outputs float32 tensors of the form [batch] that specifies the number of valid boxes per image in the batch. * `detection_boxes`: Outputs float32 tensors of the form [batch, num_boxes, 4] containing detected boxes. * `detection_scores`: Outputs float32 tensors of the form [batch, num_boxes] containing class scores for the detections. * `detection_classes`: Outputs float32 tensors of the form [batch, num_boxes] containing classes for the detections. * `raw_detection_boxes`: Outputs float32 tensors of the form [batch, raw_num_boxes, 4] containing detection boxes without post-processing. * `raw_detection_scores`: Outputs float32 tensors of the form [batch, raw_num_boxes, num_classes_with_background] containing class score logits for raw detection boxes. * `detection_masks`: (Optional) Outputs float32 tensors of the form [batch, num_boxes, mask_height, mask_width] containing predicted instance masks for each box if its present in the dictionary of postprocessed tensors returned by the model. * detection_multiclass_scores: (Optional) Outputs float32 tensor of shape [batch, num_boxes, num_classes_with_background] for containing class score distribution for detected boxes including background if any. * detection_features: (Optional) float32 tensor of shape [batch, num_boxes, roi_height, roi_width, depth] containing classifier features Notes: * This tool uses `use_moving_averages` from eval_config to decide which weights to freeze. Example Usage: -------------- python export_inference_graph.py \ --input_type image_tensor \ --pipeline_config_path path/to/ssd_inception_v2.config \ --trained_checkpoint_prefix path/to/model.ckpt \ --output_directory path/to/exported_model_directory The expected output would be in the directory path/to/exported_model_directory (which is created if it does not exist) with contents: - inference_graph.pbtxt - model.ckpt.data-00000-of-00001 - model.ckpt.info - model.ckpt.meta - frozen_inference_graph.pb + saved_model (a directory) Config overrides (see the `config_override` flag) are text protobufs (also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override certain fields in the provided pipeline_config_path. These are useful for making small changes to the inference graph that differ from the training or eval config. Example Usage (in which we change the second stage post-processing score threshold to be 0.5): python export_inference_graph.py \ --input_type image_tensor \ --pipeline_config_path path/to/ssd_inception_v2.config \ --trained_checkpoint_prefix path/to/model.ckpt \ --output_directory path/to/exported_model_directory \ --config_override " \ model{ \ faster_rcnn { \ second_stage_post_processing { \ batch_non_max_suppression { \ score_threshold: 0.5 \ } \ } \ } \ }" """ import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection import exporter from object_detection.protos import pipeline_pb2 flags = tf.app.flags flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be ' 'one of [`image_tensor`, `encoded_image_string_tensor`, ' '`tf_example`]') flags.DEFINE_string('input_shape', None, 'If input_type is `image_tensor`, this can explicitly set ' 'the shape of this input tensor to a fixed size. The ' 'dimensions are to be provided as a comma-separated list ' 'of integers. A value of -1 can be used for unknown ' 'dimensions. If not specified, for an `image_tensor, the ' 'default shape will be partially specified as ' '`[None, None, None, 3]`.') flags.DEFINE_string('pipeline_config_path', None, 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file.') flags.DEFINE_string('trained_checkpoint_prefix', None, 'Path to trained checkpoint, typically of the form ' 'path/to/model.ckpt') flags.DEFINE_string('output_directory', None, 'Path to write outputs.') flags.DEFINE_string('config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' 'text proto to override pipeline_config_path.') flags.DEFINE_boolean('write_inference_graph', False, 'If true, writes inference graph to disk.') flags.DEFINE_string('additional_output_tensor_names', None, 'Additional Tensors to output, to be specified as a comma ' 'separated list of tensor names.') flags.DEFINE_boolean('use_side_inputs', False, 'If True, uses side inputs as well as image inputs.') flags.DEFINE_string('side_input_shapes', None, 'If use_side_inputs is True, this explicitly sets ' 'the shape of the side input tensors to a fixed size. The ' 'dimensions are to be provided as a comma-separated list ' 'of integers. A value of -1 can be used for unknown ' 'dimensions. A `/` denotes a break, starting the shape of ' 'the next side input tensor. This flag is required if ' 'using side inputs.') flags.DEFINE_string('side_input_types', None, 'If use_side_inputs is True, this explicitly sets ' 'the type of the side input tensors. The ' 'dimensions are to be provided as a comma-separated list ' 'of types, each of `string`, `integer`, or `float`. ' 'This flag is required if using side inputs.') flags.DEFINE_string('side_input_names', None, 'If use_side_inputs is True, this explicitly sets ' 'the names of the side input tensors required by the model ' 'assuming the names will be a comma-separated list of ' 'strings. This flag is required if using side inputs.') tf.app.flags.mark_flag_as_required('pipeline_config_path') tf.app.flags.mark_flag_as_required('trained_checkpoint_prefix') tf.app.flags.mark_flag_as_required('output_directory') FLAGS = flags.FLAGS def main(_): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) text_format.Merge(FLAGS.config_override, pipeline_config) if FLAGS.input_shape: input_shape = [ int(dim) if dim != '-1' else None for dim in FLAGS.input_shape.split(',') ] else: input_shape = None if FLAGS.use_side_inputs: side_input_shapes, side_input_names, side_input_types = ( exporter.parse_side_inputs( FLAGS.side_input_shapes, FLAGS.side_input_names, FLAGS.side_input_types)) else: side_input_shapes = None side_input_names = None side_input_types = None if FLAGS.additional_output_tensor_names: additional_output_tensor_names = list( FLAGS.additional_output_tensor_names.split(',')) else: additional_output_tensor_names = None exporter.export_inference_graph( FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory, input_shape=input_shape, write_inference_graph=FLAGS.write_inference_graph, additional_output_tensor_names=additional_output_tensor_names, use_side_inputs=FLAGS.use_side_inputs, side_input_shapes=side_input_shapes, side_input_names=side_input_names, side_input_types=side_input_types) if __name__ == '__main__': tf.app.run()
9,607
45.640777
80
py
models
models-master/research/object_detection/model_hparams.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Hyperparameters for the object detection model in TF.learn. This file consolidates and documents the hyperparameters used by the model. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=g-import-not-at-top try: from tensorflow.contrib import training as contrib_training except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top def create_hparams(hparams_overrides=None): """Returns hyperparameters, including any flag value overrides. Args: hparams_overrides: Optional hparams overrides, represented as a string containing comma-separated hparam_name=value pairs. Returns: The hyperparameters as a tf.HParams object. """ hparams = contrib_training.HParams( # Whether a fine tuning checkpoint (provided in the pipeline config) # should be loaded for training. load_pretrained=True) # Override any of the preceding hyperparameter values. if hparams_overrides: hparams = hparams.parse(hparams_overrides) return hparams
1,796
34.235294
80
py
models
models-master/research/object_detection/model_lib_v2.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Constructs model, inputs, and training environment.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import os import pprint import time import numpy as np import tensorflow.compat.v1 as tf from object_detection import eval_util from object_detection import inputs from object_detection import model_lib from object_detection.builders import optimizer_builder from object_detection.core import standard_fields as fields from object_detection.protos import train_pb2 from object_detection.utils import config_util from object_detection.utils import label_map_util from object_detection.utils import ops from object_detection.utils import variables_helper from object_detection.utils import visualization_utils as vutils MODEL_BUILD_UTIL_MAP = model_lib.MODEL_BUILD_UTIL_MAP NUM_STEPS_PER_ITERATION = 100 LOG_EVERY = 100 RESTORE_MAP_ERROR_TEMPLATE = ( 'Since we are restoring a v2 style checkpoint' ' restore_map was expected to return a (str -> Model) mapping,' ' but we received a ({} -> {}) mapping instead.' ) def _compute_losses_and_predictions_dicts( model, features, labels, training_step=None, add_regularization_loss=True): """Computes the losses dict and predictions dict for a model on inputs. Args: model: a DetectionModel (based on Keras). features: Dictionary of feature tensors from the input dataset. Should be in the format output by `inputs.train_input` and `inputs.eval_input`. features[fields.InputDataFields.image] is a [batch_size, H, W, C] float32 tensor with preprocessed images. features[HASH_KEY] is a [batch_size] int32 tensor representing unique identifiers for the images. features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] int32 tensor representing the true image shapes, as preprocessed images could be padded. features[fields.InputDataFields.original_image] (optional) is a [batch_size, H, W, C] float32 tensor with original images. labels: A dictionary of groundtruth tensors post-unstacking. The original labels are of the form returned by `inputs.train_input` and `inputs.eval_input`. The shapes may have been modified by unstacking with `model_lib.unstack_batch`. However, the dictionary includes the following fields. labels[fields.InputDataFields.num_groundtruth_boxes] is a int32 tensor indicating the number of valid groundtruth boxes per image. labels[fields.InputDataFields.groundtruth_boxes] is a float32 tensor containing the corners of the groundtruth boxes. labels[fields.InputDataFields.groundtruth_classes] is a float32 one-hot tensor of classes. labels[fields.InputDataFields.groundtruth_weights] is a float32 tensor containing groundtruth weights for the boxes. -- Optional -- labels[fields.InputDataFields.groundtruth_instance_masks] is a float32 tensor containing only binary values, which represent instance masks for objects. labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a float32 tensor containing weights for the instance masks. labels[fields.InputDataFields.groundtruth_keypoints] is a float32 tensor containing keypoints for each box. labels[fields.InputDataFields.groundtruth_dp_num_points] is an int32 tensor with the number of sampled DensePose points per object. labels[fields.InputDataFields.groundtruth_dp_part_ids] is an int32 tensor with the DensePose part ids (0-indexed) per object. labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a float32 tensor with the DensePose surface coordinates. labels[fields.InputDataFields.groundtruth_group_of] is a tf.bool tensor containing group_of annotations. labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32 k-hot tensor of classes. labels[fields.InputDataFields.groundtruth_track_ids] is a int32 tensor of track IDs. labels[fields.InputDataFields.groundtruth_keypoint_depths] is a float32 tensor containing keypoint depths information. labels[fields.InputDataFields.groundtruth_keypoint_depth_weights] is a float32 tensor containing the weights of the keypoint depth feature. training_step: int, the current training step. add_regularization_loss: Whether or not to include the model's regularization loss in the losses dictionary. Returns: A tuple containing the losses dictionary (with the total loss under the key 'Loss/total_loss'), and the predictions dictionary produced by `model.predict`. """ model_lib.provide_groundtruth(model, labels, training_step=training_step) preprocessed_images = features[fields.InputDataFields.image] prediction_dict = model.predict( preprocessed_images, features[fields.InputDataFields.true_image_shape], **model.get_side_inputs(features)) prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict) losses_dict = model.loss( prediction_dict, features[fields.InputDataFields.true_image_shape]) losses = [loss_tensor for loss_tensor in losses_dict.values()] if add_regularization_loss: # TODO(kaftan): As we figure out mixed precision & bfloat 16, we may ## need to convert these regularization losses from bfloat16 to float32 ## as well. regularization_losses = model.regularization_losses() if regularization_losses: regularization_losses = ops.bfloat16_to_float32_nested( regularization_losses) regularization_loss = tf.add_n( regularization_losses, name='regularization_loss') losses.append(regularization_loss) losses_dict['Loss/regularization_loss'] = regularization_loss total_loss = tf.add_n(losses, name='total_loss') losses_dict['Loss/total_loss'] = total_loss return losses_dict, prediction_dict def _ensure_model_is_built(model, input_dataset, unpad_groundtruth_tensors): """Ensures that model variables are all built, by running on a dummy input. Args: model: A DetectionModel to be built. input_dataset: The tf.data Dataset the model is being trained on. Needed to get the shapes for the dummy loss computation. unpad_groundtruth_tensors: A parameter passed to unstack_batch. """ features, labels = iter(input_dataset).next() @tf.function def _dummy_computation_fn(features, labels): model._is_training = False # pylint: disable=protected-access tf.keras.backend.set_learning_phase(False) labels = model_lib.unstack_batch( labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) return _compute_losses_and_predictions_dicts(model, features, labels, training_step=0) strategy = tf.compat.v2.distribute.get_strategy() if hasattr(tf.distribute.Strategy, 'run'): strategy.run( _dummy_computation_fn, args=( features, labels, )) else: strategy.experimental_run_v2( _dummy_computation_fn, args=( features, labels, )) def normalize_dict(values_dict, num_replicas): num_replicas = tf.constant(num_replicas, dtype=tf.float32) return {key: tf.math.divide(loss, num_replicas) for key, loss in values_dict.items()} def reduce_dict(strategy, reduction_dict, reduction_op): # TODO(anjalisridhar): explore if it is safe to remove the # num_replicas # scaling of the loss and switch this to a ReduceOp.Mean return { name: strategy.reduce(reduction_op, loss, axis=None) for name, loss in reduction_dict.items() } # TODO(kaftan): Explore removing learning_rate from this method & returning ## The full losses dict instead of just total_loss, then doing all summaries ## saving in a utility method called by the outer training loop. # TODO(kaftan): Explore adding gradient summaries def eager_train_step(detection_model, features, labels, unpad_groundtruth_tensors, optimizer, training_step, add_regularization_loss=True, clip_gradients_value=None, num_replicas=1.0): """Process a single training batch. This method computes the loss for the model on a single training batch, while tracking the gradients with a gradient tape. It then updates the model variables with the optimizer, clipping the gradients if clip_gradients_value is present. This method can run eagerly or inside a tf.function. Args: detection_model: A DetectionModel (based on Keras) to train. features: Dictionary of feature tensors from the input dataset. Should be in the format output by `inputs.train_input. features[fields.InputDataFields.image] is a [batch_size, H, W, C] float32 tensor with preprocessed images. features[HASH_KEY] is a [batch_size] int32 tensor representing unique identifiers for the images. features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] int32 tensor representing the true image shapes, as preprocessed images could be padded. features[fields.InputDataFields.original_image] (optional, not used during training) is a [batch_size, H, W, C] float32 tensor with original images. labels: A dictionary of groundtruth tensors. This method unstacks these labels using model_lib.unstack_batch. The stacked labels are of the form returned by `inputs.train_input` and `inputs.eval_input`. labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size] int32 tensor indicating the number of valid groundtruth boxes per image. labels[fields.InputDataFields.groundtruth_boxes] is a [batch_size, num_boxes, 4] float32 tensor containing the corners of the groundtruth boxes. labels[fields.InputDataFields.groundtruth_classes] is a [batch_size, num_boxes, num_classes] float32 one-hot tensor of classes. num_classes includes the background class. labels[fields.InputDataFields.groundtruth_weights] is a [batch_size, num_boxes] float32 tensor containing groundtruth weights for the boxes. -- Optional -- labels[fields.InputDataFields.groundtruth_instance_masks] is a [batch_size, num_boxes, H, W] float32 tensor containing only binary values, which represent instance masks for objects. labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a [batch_size, num_boxes] float32 tensor containing weights for the instance masks. labels[fields.InputDataFields.groundtruth_keypoints] is a [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing keypoints for each box. labels[fields.InputDataFields.groundtruth_dp_num_points] is a [batch_size, num_boxes] int32 tensor with the number of DensePose sampled points per instance. labels[fields.InputDataFields.groundtruth_dp_part_ids] is a [batch_size, num_boxes, max_sampled_points] int32 tensor with the part ids (0-indexed) for each instance. labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a [batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the surface coordinates for each point. Each surface coordinate is of the form (y, x, v, u) where (y, x) are normalized image locations and (v, u) are part-relative normalized surface coordinates. labels[fields.InputDataFields.groundtruth_labeled_classes] is a float32 k-hot tensor of classes. labels[fields.InputDataFields.groundtruth_track_ids] is a int32 tensor of track IDs. labels[fields.InputDataFields.groundtruth_keypoint_depths] is a float32 tensor containing keypoint depths information. labels[fields.InputDataFields.groundtruth_keypoint_depth_weights] is a float32 tensor containing the weights of the keypoint depth feature. unpad_groundtruth_tensors: A parameter passed to unstack_batch. optimizer: The training optimizer that will update the variables. training_step: int, the training step number. add_regularization_loss: Whether or not to include the model's regularization loss in the losses dictionary. clip_gradients_value: If this is present, clip the gradients global norm at this value using `tf.clip_by_global_norm`. num_replicas: The number of replicas in the current distribution strategy. This is used to scale the total loss so that training in a distribution strategy works correctly. Returns: The total loss observed at this training step """ # """Execute a single training step in the TF v2 style loop.""" is_training = True detection_model._is_training = is_training # pylint: disable=protected-access tf.keras.backend.set_learning_phase(is_training) labels = model_lib.unstack_batch( labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) with tf.GradientTape() as tape: losses_dict, _ = _compute_losses_and_predictions_dicts( detection_model, features, labels, training_step=training_step, add_regularization_loss=add_regularization_loss) losses_dict = normalize_dict(losses_dict, num_replicas) trainable_variables = detection_model.trainable_variables total_loss = losses_dict['Loss/total_loss'] gradients = tape.gradient(total_loss, trainable_variables) if clip_gradients_value: gradients, _ = tf.clip_by_global_norm(gradients, clip_gradients_value) optimizer.apply_gradients(zip(gradients, trainable_variables)) return losses_dict def validate_tf_v2_checkpoint_restore_map(checkpoint_restore_map): """Ensure that given dict is a valid TF v2 style restore map. Args: checkpoint_restore_map: A nested dict mapping strings to tf.keras.Model objects. Raises: ValueError: If they keys in checkpoint_restore_map are not strings or if the values are not keras Model objects. """ for key, value in checkpoint_restore_map.items(): if not (isinstance(key, str) and (isinstance(value, tf.Module) or isinstance(value, tf.train.Checkpoint))): if isinstance(key, str) and isinstance(value, dict): validate_tf_v2_checkpoint_restore_map(value) else: raise TypeError( RESTORE_MAP_ERROR_TEMPLATE.format(key.__class__.__name__, value.__class__.__name__)) def is_object_based_checkpoint(checkpoint_path): """Returns true if `checkpoint_path` points to an object-based checkpoint.""" var_names = [var[0] for var in tf.train.list_variables(checkpoint_path)] return '_CHECKPOINTABLE_OBJECT_GRAPH' in var_names def load_fine_tune_checkpoint(model, checkpoint_path, checkpoint_type, checkpoint_version, run_model_on_dummy_input, input_dataset, unpad_groundtruth_tensors): """Load a fine tuning classification or detection checkpoint. To make sure the model variables are all built, this method first executes the model by computing a dummy loss. (Models might not have built their variables before their first execution) It then loads an object-based classification or detection checkpoint. This method updates the model in-place and does not return a value. Args: model: A DetectionModel (based on Keras) to load a fine-tuning checkpoint for. checkpoint_path: Directory with checkpoints file or path to checkpoint. checkpoint_type: Whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. checkpoint_version: train_pb2.CheckpointVersion.V1 or V2 enum indicating whether to load checkpoints in V1 style or V2 style. In this binary we only support V2 style (object-based) checkpoints. run_model_on_dummy_input: Whether to run the model on a dummy input in order to ensure that all model variables have been built successfully before loading the fine_tune_checkpoint. input_dataset: The tf.data Dataset the model is being trained on. Needed to get the shapes for the dummy loss computation. unpad_groundtruth_tensors: A parameter passed to unstack_batch. Raises: IOError: if `checkpoint_path` does not point at a valid object-based checkpoint ValueError: if `checkpoint_version` is not train_pb2.CheckpointVersion.V2 """ if not is_object_based_checkpoint(checkpoint_path): raise IOError('Checkpoint is expected to be an object-based checkpoint.') if checkpoint_version == train_pb2.CheckpointVersion.V1: raise ValueError('Checkpoint version should be V2') if run_model_on_dummy_input: _ensure_model_is_built(model, input_dataset, unpad_groundtruth_tensors) restore_from_objects_dict = model.restore_from_objects( fine_tune_checkpoint_type=checkpoint_type) validate_tf_v2_checkpoint_restore_map(restore_from_objects_dict) ckpt = tf.train.Checkpoint(**restore_from_objects_dict) ckpt.restore( checkpoint_path).expect_partial().assert_existing_objects_matched() def get_filepath(strategy, filepath): """Get appropriate filepath for worker. Args: strategy: A tf.distribute.Strategy object. filepath: A path to where the Checkpoint object is stored. Returns: A temporary filepath for non-chief workers to use or the original filepath for the chief. """ if strategy.extended.should_checkpoint: return filepath else: # TODO(vighneshb) Replace with the public API when TF exposes it. task_id = strategy.extended._task_id # pylint:disable=protected-access return os.path.join(filepath, 'temp_worker_{:03d}'.format(task_id)) def clean_temporary_directories(strategy, filepath): """Temporary directory clean up for MultiWorker Mirrored Strategy. This is needed for all non-chief workers. Args: strategy: A tf.distribute.Strategy object. filepath: The filepath for the temporary directory. """ if not strategy.extended.should_checkpoint: if tf.io.gfile.exists(filepath) and tf.io.gfile.isdir(filepath): tf.io.gfile.rmtree(filepath) def train_loop( pipeline_config_path, model_dir, config_override=None, train_steps=None, use_tpu=False, save_final_config=False, checkpoint_every_n=1000, checkpoint_max_to_keep=7, record_summaries=True, performance_summary_exporter=None, num_steps_per_iteration=NUM_STEPS_PER_ITERATION, **kwargs): """Trains a model using eager + functions. This method: 1. Processes the pipeline configs 2. (Optionally) saves the as-run config 3. Builds the model & optimizer 4. Gets the training input data 5. Loads a fine-tuning detection or classification checkpoint if requested 6. Loops over the train data, executing distributed training steps inside tf.functions. 7. Checkpoints the model every `checkpoint_every_n` training steps. 8. Logs the training metrics as TensorBoard summaries. Args: pipeline_config_path: A path to a pipeline config file. model_dir: The directory to save checkpoints and summaries to. config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to override the config from `pipeline_config_path`. train_steps: Number of training steps. If None, the number of training steps is set from the `TrainConfig` proto. use_tpu: Boolean, whether training and evaluation should run on TPU. save_final_config: Whether to save final config (obtained after applying overrides) to `model_dir`. checkpoint_every_n: Checkpoint every n training steps. checkpoint_max_to_keep: int, the number of most recent checkpoints to keep in the model directory. record_summaries: Boolean, whether or not to record summaries defined by the model or the training pipeline. This does not impact the summaries of the loss values which are always recorded. Examples of summaries that are controlled by this flag include: - Image summaries of training images. - Intermediate tensors which maybe logged by meta architectures. performance_summary_exporter: function for exporting performance metrics. num_steps_per_iteration: int, The number of training steps to perform in each iteration. **kwargs: Additional keyword arguments for configuration override. """ ## Parse the configs get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ 'get_configs_from_pipeline_file'] merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ 'merge_external_params_with_configs'] create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[ 'create_pipeline_proto_from_configs'] steps_per_sec_list = [] configs = get_configs_from_pipeline_file( pipeline_config_path, config_override=config_override) kwargs.update({ 'train_steps': train_steps, 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu }) configs = merge_external_params_with_configs( configs, None, kwargs_dict=kwargs) model_config = configs['model'] train_config = configs['train_config'] train_input_config = configs['train_input_config'] unpad_groundtruth_tensors = train_config.unpad_groundtruth_tensors add_regularization_loss = train_config.add_regularization_loss clip_gradients_value = None if train_config.gradient_clipping_by_norm > 0: clip_gradients_value = train_config.gradient_clipping_by_norm # update train_steps from config but only when non-zero value is provided if train_steps is None and train_config.num_steps != 0: train_steps = train_config.num_steps if kwargs['use_bfloat16']: tf.compat.v2.keras.mixed_precision.set_global_policy('mixed_bfloat16') if train_config.load_all_detection_checkpoint_vars: raise ValueError('train_pb2.load_all_detection_checkpoint_vars ' 'unsupported in TF2') config_util.update_fine_tune_checkpoint_type(train_config) fine_tune_checkpoint_type = train_config.fine_tune_checkpoint_type fine_tune_checkpoint_version = train_config.fine_tune_checkpoint_version # Write the as-run pipeline config to disk. if save_final_config: tf.logging.info('Saving pipeline config file to directory %s', model_dir) pipeline_config_final = create_pipeline_proto_from_configs(configs) config_util.save_pipeline_config(pipeline_config_final, model_dir) # Build the model, optimizer, and training input strategy = tf.compat.v2.distribute.get_strategy() with strategy.scope(): detection_model = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']( model_config=model_config, is_training=True, add_summaries=record_summaries) def train_dataset_fn(input_context): """Callable to create train input.""" # Create the inputs. train_input = inputs.train_input( train_config=train_config, train_input_config=train_input_config, model_config=model_config, model=detection_model, input_context=input_context) train_input = train_input.repeat() return train_input train_input = strategy.experimental_distribute_datasets_from_function( train_dataset_fn) global_step = tf.Variable( 0, trainable=False, dtype=tf.compat.v2.dtypes.int64, name='global_step', aggregation=tf.compat.v2.VariableAggregation.ONLY_FIRST_REPLICA) optimizer, (learning_rate,) = optimizer_builder.build( train_config.optimizer, global_step=global_step) # We run the detection_model on dummy inputs in order to ensure that the # model and all its variables have been properly constructed. Specifically, # this is currently necessary prior to (potentially) creating shadow copies # of the model variables for the EMA optimizer. if train_config.optimizer.use_moving_average: _ensure_model_is_built(detection_model, train_input, unpad_groundtruth_tensors) optimizer.shadow_copy(detection_model) if callable(learning_rate): learning_rate_fn = learning_rate else: learning_rate_fn = lambda: learning_rate ## Train the model # Get the appropriate filepath (temporary or not) based on whether the worker # is the chief. summary_writer_filepath = get_filepath(strategy, os.path.join(model_dir, 'train')) summary_writer = tf.compat.v2.summary.create_file_writer( summary_writer_filepath) with summary_writer.as_default(): with strategy.scope(): with tf.compat.v2.summary.record_if( lambda: global_step % num_steps_per_iteration == 0): # Load a fine-tuning checkpoint. if train_config.fine_tune_checkpoint: variables_helper.ensure_checkpoint_supported( train_config.fine_tune_checkpoint, fine_tune_checkpoint_type, model_dir) load_fine_tune_checkpoint( detection_model, train_config.fine_tune_checkpoint, fine_tune_checkpoint_type, fine_tune_checkpoint_version, train_config.run_fine_tune_checkpoint_dummy_computation, train_input, unpad_groundtruth_tensors) ckpt = tf.compat.v2.train.Checkpoint( step=global_step, model=detection_model, optimizer=optimizer) manager_dir = get_filepath(strategy, model_dir) if not strategy.extended.should_checkpoint: checkpoint_max_to_keep = 1 manager = tf.compat.v2.train.CheckpointManager( ckpt, manager_dir, max_to_keep=checkpoint_max_to_keep) # We use the following instead of manager.latest_checkpoint because # manager_dir does not point to the model directory when we are running # in a worker. latest_checkpoint = tf.train.latest_checkpoint(model_dir) ckpt.restore(latest_checkpoint) def train_step_fn(features, labels): """Single train step.""" if record_summaries: tf.compat.v2.summary.image( name='train_input_images', step=global_step, data=features[fields.InputDataFields.image], max_outputs=3) losses_dict = eager_train_step( detection_model, features, labels, unpad_groundtruth_tensors, optimizer, training_step=global_step, add_regularization_loss=add_regularization_loss, clip_gradients_value=clip_gradients_value, num_replicas=strategy.num_replicas_in_sync) global_step.assign_add(1) return losses_dict def _sample_and_train(strategy, train_step_fn, data_iterator): features, labels = data_iterator.next() if hasattr(tf.distribute.Strategy, 'run'): per_replica_losses_dict = strategy.run( train_step_fn, args=(features, labels)) else: per_replica_losses_dict = ( strategy.experimental_run_v2( train_step_fn, args=(features, labels))) return reduce_dict( strategy, per_replica_losses_dict, tf.distribute.ReduceOp.SUM) @tf.function def _dist_train_step(data_iterator): """A distributed train step.""" if num_steps_per_iteration > 1: for _ in tf.range(num_steps_per_iteration - 1): # Following suggestion on yaqs/5402607292645376 with tf.name_scope(''): _sample_and_train(strategy, train_step_fn, data_iterator) return _sample_and_train(strategy, train_step_fn, data_iterator) train_input_iter = iter(train_input) if int(global_step.value()) == 0: manager.save() checkpointed_step = int(global_step.value()) logged_step = global_step.value() last_step_time = time.time() for _ in range(global_step.value(), train_steps, num_steps_per_iteration): losses_dict = _dist_train_step(train_input_iter) time_taken = time.time() - last_step_time last_step_time = time.time() steps_per_sec = num_steps_per_iteration * 1.0 / time_taken tf.compat.v2.summary.scalar( 'steps_per_sec', steps_per_sec, step=global_step) steps_per_sec_list.append(steps_per_sec) logged_dict = losses_dict.copy() logged_dict['learning_rate'] = learning_rate_fn() for key, val in logged_dict.items(): tf.compat.v2.summary.scalar(key, val, step=global_step) if global_step.value() - logged_step >= LOG_EVERY: logged_dict_np = {name: value.numpy() for name, value in logged_dict.items()} tf.logging.info( 'Step {} per-step time {:.3f}s'.format( global_step.value(), time_taken / num_steps_per_iteration)) tf.logging.info(pprint.pformat(logged_dict_np, width=40)) logged_step = global_step.value() if ((int(global_step.value()) - checkpointed_step) >= checkpoint_every_n): manager.save() checkpointed_step = int(global_step.value()) # Remove the checkpoint directories of the non-chief workers that # MultiWorkerMirroredStrategy forces us to save during sync distributed # training. clean_temporary_directories(strategy, manager_dir) clean_temporary_directories(strategy, summary_writer_filepath) # TODO(pkanwar): add accuracy metrics. if performance_summary_exporter is not None: metrics = { 'steps_per_sec': np.mean(steps_per_sec_list), 'steps_per_sec_p50': np.median(steps_per_sec_list), 'steps_per_sec_max': max(steps_per_sec_list), 'last_batch_loss': float(losses_dict['Loss/total_loss']) } mixed_precision = 'bf16' if kwargs['use_bfloat16'] else 'fp32' performance_summary_exporter(metrics, mixed_precision) def prepare_eval_dict(detections, groundtruth, features): """Prepares eval dictionary containing detections and groundtruth. Takes in `detections` from the model, `groundtruth` and `features` returned from the eval tf.data.dataset and creates a dictionary of tensors suitable for detection eval modules. Args: detections: A dictionary of tensors returned by `model.postprocess`. groundtruth: `inputs.eval_input` returns an eval dataset of (features, labels) tuple. `groundtruth` must be set to `labels`. Please note that: * fields.InputDataFields.groundtruth_classes must be 0-indexed and in its 1-hot representation. * fields.InputDataFields.groundtruth_verified_neg_classes must be 0-indexed and in its multi-hot repesentation. * fields.InputDataFields.groundtruth_not_exhaustive_classes must be 0-indexed and in its multi-hot repesentation. * fields.InputDataFields.groundtruth_labeled_classes must be 0-indexed and in its multi-hot repesentation. features: `inputs.eval_input` returns an eval dataset of (features, labels) tuple. This argument must be set to a dictionary containing the following keys and their corresponding values from `features` -- * fields.InputDataFields.image * fields.InputDataFields.original_image * fields.InputDataFields.original_image_spatial_shape * fields.InputDataFields.true_image_shape * inputs.HASH_KEY Returns: eval_dict: A dictionary of tensors to pass to eval module. class_agnostic: Whether to evaluate detection in class agnostic mode. """ groundtruth_boxes = groundtruth[fields.InputDataFields.groundtruth_boxes] groundtruth_boxes_shape = tf.shape(groundtruth_boxes) # For class-agnostic models, groundtruth one-hot encodings collapse to all # ones. class_agnostic = ( fields.DetectionResultFields.detection_classes not in detections) if class_agnostic: groundtruth_classes_one_hot = tf.ones( [groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1]) else: groundtruth_classes_one_hot = groundtruth[ fields.InputDataFields.groundtruth_classes] label_id_offset = 1 # Applying label id offset (b/63711816) groundtruth_classes = ( tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset) groundtruth[fields.InputDataFields.groundtruth_classes] = groundtruth_classes label_id_offset_paddings = tf.constant([[0, 0], [1, 0]]) if fields.InputDataFields.groundtruth_verified_neg_classes in groundtruth: groundtruth[ fields.InputDataFields.groundtruth_verified_neg_classes] = tf.pad( groundtruth[ fields.InputDataFields.groundtruth_verified_neg_classes], label_id_offset_paddings) if fields.InputDataFields.groundtruth_not_exhaustive_classes in groundtruth: groundtruth[ fields.InputDataFields.groundtruth_not_exhaustive_classes] = tf.pad( groundtruth[ fields.InputDataFields.groundtruth_not_exhaustive_classes], label_id_offset_paddings) if fields.InputDataFields.groundtruth_labeled_classes in groundtruth: groundtruth[fields.InputDataFields.groundtruth_labeled_classes] = tf.pad( groundtruth[fields.InputDataFields.groundtruth_labeled_classes], label_id_offset_paddings) use_original_images = fields.InputDataFields.original_image in features if use_original_images: eval_images = features[fields.InputDataFields.original_image] true_image_shapes = features[fields.InputDataFields.true_image_shape][:, :3] original_image_spatial_shapes = features[ fields.InputDataFields.original_image_spatial_shape] else: eval_images = features[fields.InputDataFields.image] true_image_shapes = None original_image_spatial_shapes = None eval_dict = eval_util.result_dict_for_batched_example( eval_images, features[inputs.HASH_KEY], detections, groundtruth, class_agnostic=class_agnostic, scale_to_absolute=True, original_image_spatial_shapes=original_image_spatial_shapes, true_image_shapes=true_image_shapes) return eval_dict, class_agnostic def concat_replica_results(tensor_dict): new_tensor_dict = {} for key, values in tensor_dict.items(): new_tensor_dict[key] = tf.concat(values, axis=0) return new_tensor_dict def eager_eval_loop( detection_model, configs, eval_dataset, use_tpu=False, postprocess_on_cpu=False, global_step=None, ): """Evaluate the model eagerly on the evaluation dataset. This method will compute the evaluation metrics specified in the configs on the entire evaluation dataset, then return the metrics. It will also log the metrics to TensorBoard. Args: detection_model: A DetectionModel (based on Keras) to evaluate. configs: Object detection configs that specify the evaluators that should be used, as well as whether regularization loss should be included and if bfloat16 should be used on TPUs. eval_dataset: Dataset containing evaluation data. use_tpu: Whether a TPU is being used to execute the model for evaluation. postprocess_on_cpu: Whether model postprocessing should happen on the CPU when using a TPU to execute the model. global_step: A variable containing the training step this model was trained to. Used for logging purposes. Returns: A dict of evaluation metrics representing the results of this evaluation. """ del postprocess_on_cpu train_config = configs['train_config'] eval_input_config = configs['eval_input_config'] eval_config = configs['eval_config'] add_regularization_loss = train_config.add_regularization_loss is_training = False detection_model._is_training = is_training # pylint: disable=protected-access tf.keras.backend.set_learning_phase(is_training) evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) batch_size = eval_config.batch_size class_agnostic_category_index = ( label_map_util.create_class_agnostic_category_index()) class_agnostic_evaluators = eval_util.get_evaluators( eval_config, list(class_agnostic_category_index.values()), evaluator_options) class_aware_evaluators = None if eval_input_config.label_map_path: class_aware_category_index = ( label_map_util.create_category_index_from_labelmap( eval_input_config.label_map_path)) class_aware_evaluators = eval_util.get_evaluators( eval_config, list(class_aware_category_index.values()), evaluator_options) evaluators = None loss_metrics = {} @tf.function def compute_eval_dict(features, labels): """Compute the evaluation result on an image.""" # For evaling on train data, it is necessary to check whether groundtruth # must be unpadded. boxes_shape = ( labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list()) unpad_groundtruth_tensors = (boxes_shape[1] is not None and not use_tpu and batch_size == 1) groundtruth_dict = labels labels = model_lib.unstack_batch( labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) losses_dict, prediction_dict = _compute_losses_and_predictions_dicts( detection_model, features, labels, training_step=None, add_regularization_loss=add_regularization_loss) prediction_dict = detection_model.postprocess( prediction_dict, features[fields.InputDataFields.true_image_shape]) eval_features = { fields.InputDataFields.image: features[fields.InputDataFields.image], fields.InputDataFields.original_image: features[fields.InputDataFields.original_image], fields.InputDataFields.original_image_spatial_shape: features[fields.InputDataFields.original_image_spatial_shape], fields.InputDataFields.true_image_shape: features[fields.InputDataFields.true_image_shape], inputs.HASH_KEY: features[inputs.HASH_KEY], } return losses_dict, prediction_dict, groundtruth_dict, eval_features agnostic_categories = label_map_util.create_class_agnostic_category_index() per_class_categories = label_map_util.create_category_index_from_labelmap( eval_input_config.label_map_path) keypoint_edges = [ (kp.start, kp.end) for kp in eval_config.keypoint_edge] strategy = tf.compat.v2.distribute.get_strategy() for i, (features, labels) in enumerate(eval_dataset): try: (losses_dict, prediction_dict, groundtruth_dict, eval_features) = strategy.run( compute_eval_dict, args=(features, labels)) except Exception as exc: # pylint:disable=broad-except tf.logging.info('Encountered %s exception.', exc) tf.logging.info('A replica probably exhausted all examples. Skipping ' 'pending examples on other replicas.') break (local_prediction_dict, local_groundtruth_dict, local_eval_features) = tf.nest.map_structure( strategy.experimental_local_results, [prediction_dict, groundtruth_dict, eval_features]) local_prediction_dict = concat_replica_results(local_prediction_dict) local_groundtruth_dict = concat_replica_results(local_groundtruth_dict) local_eval_features = concat_replica_results(local_eval_features) eval_dict, class_agnostic = prepare_eval_dict(local_prediction_dict, local_groundtruth_dict, local_eval_features) for loss_key, loss_tensor in iter(losses_dict.items()): losses_dict[loss_key] = strategy.reduce(tf.distribute.ReduceOp.MEAN, loss_tensor, None) if class_agnostic: category_index = agnostic_categories else: category_index = per_class_categories if i % 100 == 0: tf.logging.info('Finished eval step %d', i) use_original_images = fields.InputDataFields.original_image in features if (use_original_images and i < eval_config.num_visualizations): sbys_image_list = vutils.draw_side_by_side_evaluation_image( eval_dict, category_index=category_index, max_boxes_to_draw=eval_config.max_num_boxes_to_visualize, min_score_thresh=eval_config.min_score_threshold, use_normalized_coordinates=False, keypoint_edges=keypoint_edges or None) for j, sbys_image in enumerate(sbys_image_list): tf.compat.v2.summary.image( name='eval_side_by_side_{}_{}'.format(i, j), step=global_step, data=sbys_image, max_outputs=eval_config.num_visualizations) if eval_util.has_densepose(eval_dict): dp_image_list = vutils.draw_densepose_visualizations( eval_dict) for j, dp_image in enumerate(dp_image_list): tf.compat.v2.summary.image( name='densepose_detections_{}_{}'.format(i, j), step=global_step, data=dp_image, max_outputs=eval_config.num_visualizations) if evaluators is None: if class_agnostic: evaluators = class_agnostic_evaluators else: evaluators = class_aware_evaluators for evaluator in evaluators: evaluator.add_eval_dict(eval_dict) for loss_key, loss_tensor in iter(losses_dict.items()): if loss_key not in loss_metrics: loss_metrics[loss_key] = [] loss_metrics[loss_key].append(loss_tensor) eval_metrics = {} for evaluator in evaluators: eval_metrics.update(evaluator.evaluate()) for loss_key in loss_metrics: eval_metrics[loss_key] = tf.reduce_mean(loss_metrics[loss_key]) eval_metrics = {str(k): v for k, v in eval_metrics.items()} tf.logging.info('Eval metrics at step %d', global_step.numpy()) for k in eval_metrics: tf.compat.v2.summary.scalar(k, eval_metrics[k], step=global_step) tf.logging.info('\t+ %s: %f', k, eval_metrics[k]) return eval_metrics def eval_continuously( pipeline_config_path, config_override=None, train_steps=None, sample_1_of_n_eval_examples=1, sample_1_of_n_eval_on_train_examples=1, use_tpu=False, override_eval_num_epochs=True, postprocess_on_cpu=False, model_dir=None, checkpoint_dir=None, wait_interval=180, timeout=3600, eval_index=0, save_final_config=False, **kwargs): """Run continuous evaluation of a detection model eagerly. This method builds the model, and continously restores it from the most recent training checkpoint in the checkpoint directory & evaluates it on the evaluation data. Args: pipeline_config_path: A path to a pipeline config file. config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to override the config from `pipeline_config_path`. train_steps: Number of training steps. If None, the number of training steps is set from the `TrainConfig` proto. sample_1_of_n_eval_examples: Integer representing how often an eval example should be sampled. If 1, will sample all examples. sample_1_of_n_eval_on_train_examples: Similar to `sample_1_of_n_eval_examples`, except controls the sampling of training data for evaluation. use_tpu: Boolean, whether training and evaluation should run on TPU. override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for eval_input. postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true, postprocess is scheduled on the host cpu. model_dir: Directory to output resulting evaluation summaries to. checkpoint_dir: Directory that contains the training checkpoints. wait_interval: The mimmum number of seconds to wait before checking for a new checkpoint. timeout: The maximum number of seconds to wait for a checkpoint. Execution will terminate if no new checkpoints are found after these many seconds. eval_index: int, If given, only evaluate the dataset at the given index. By default, evaluates dataset at 0'th index. save_final_config: Whether to save the pipeline config file to the model directory. **kwargs: Additional keyword arguments for configuration override. """ get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ 'get_configs_from_pipeline_file'] create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[ 'create_pipeline_proto_from_configs'] merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ 'merge_external_params_with_configs'] configs = get_configs_from_pipeline_file( pipeline_config_path, config_override=config_override) kwargs.update({ 'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples, 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu }) if train_steps is not None: kwargs['train_steps'] = train_steps if override_eval_num_epochs: kwargs.update({'eval_num_epochs': 1}) tf.logging.warning( 'Forced number of epochs for all eval validations to be 1.') configs = merge_external_params_with_configs( configs, None, kwargs_dict=kwargs) if model_dir and save_final_config: tf.logging.info('Saving pipeline config file to directory %s', model_dir) pipeline_config_final = create_pipeline_proto_from_configs(configs) config_util.save_pipeline_config(pipeline_config_final, model_dir) model_config = configs['model'] train_input_config = configs['train_input_config'] eval_config = configs['eval_config'] eval_input_configs = configs['eval_input_configs'] eval_on_train_input_config = copy.deepcopy(train_input_config) eval_on_train_input_config.sample_1_of_n_examples = ( sample_1_of_n_eval_on_train_examples) if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1: tf.logging.warning( ('Expected number of evaluation epochs is 1, but ' 'instead encountered `eval_on_train_input_config' '.num_epochs` = %d. Overwriting `num_epochs` to 1.'), eval_on_train_input_config.num_epochs) eval_on_train_input_config.num_epochs = 1 if kwargs['use_bfloat16']: tf.compat.v2.keras.mixed_precision.set_global_policy('mixed_bfloat16') eval_input_config = eval_input_configs[eval_index] strategy = tf.compat.v2.distribute.get_strategy() with strategy.scope(): detection_model = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']( model_config=model_config, is_training=True) eval_input = strategy.experimental_distribute_dataset( inputs.eval_input( eval_config=eval_config, eval_input_config=eval_input_config, model_config=model_config, model=detection_model)) global_step = tf.compat.v2.Variable( 0, trainable=False, dtype=tf.compat.v2.dtypes.int64) optimizer, _ = optimizer_builder.build( configs['train_config'].optimizer, global_step=global_step) for latest_checkpoint in tf.train.checkpoints_iterator( checkpoint_dir, timeout=timeout, min_interval_secs=wait_interval): ckpt = tf.compat.v2.train.Checkpoint( step=global_step, model=detection_model, optimizer=optimizer) # We run the detection_model on dummy inputs in order to ensure that the # model and all its variables have been properly constructed. Specifically, # this is currently necessary prior to (potentially) creating shadow copies # of the model variables for the EMA optimizer. if eval_config.use_moving_averages: unpad_groundtruth_tensors = (eval_config.batch_size == 1 and not use_tpu) _ensure_model_is_built(detection_model, eval_input, unpad_groundtruth_tensors) optimizer.shadow_copy(detection_model) ckpt.restore(latest_checkpoint).expect_partial() if eval_config.use_moving_averages: optimizer.swap_weights() summary_writer = tf.compat.v2.summary.create_file_writer( os.path.join(model_dir, 'eval', eval_input_config.name)) with summary_writer.as_default(): eager_eval_loop( detection_model, configs, eval_input, use_tpu=use_tpu, postprocess_on_cpu=postprocess_on_cpu, global_step=global_step, ) if global_step.numpy() == configs['train_config'].num_steps: tf.logging.info('Exiting evaluation at step %d', global_step.numpy()) return
49,989
41.726496
80
py
models
models-master/research/object_detection/model_lib_tf2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object detection model library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import tempfile import unittest import numpy as np import six import tensorflow.compat.v1 as tf import tensorflow.compat.v2 as tf2 from object_detection import exporter_lib_v2 from object_detection import inputs from object_detection import model_lib_v2 from object_detection.core import model from object_detection.protos import train_pb2 from object_detection.utils import config_util from object_detection.utils import tf_version if six.PY2: import mock # pylint: disable=g-importing-member,g-import-not-at-top else: from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top # Model for test. Current options are: # 'ssd_mobilenet_v2_pets_keras' MODEL_NAME_FOR_TEST = 'ssd_mobilenet_v2_pets_keras' def _get_data_path(): """Returns an absolute path to TFRecord file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', 'pets_examples.record') def get_pipeline_config_path(model_name): """Returns path to the local pipeline config file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', 'configs', model_name + '.config') def _get_labelmap_path(): """Returns an absolute path to label map file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'data', 'pet_label_map.pbtxt') def _get_config_kwarg_overrides(): """Returns overrides to the configs that insert the correct local paths.""" data_path = _get_data_path() label_map_path = _get_labelmap_path() return { 'train_input_path': data_path, 'eval_input_path': data_path, 'label_map_path': label_map_path, 'train_input_reader': {'batch_size': 1} } @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ModelLibTest(tf.test.TestCase): @classmethod def setUpClass(cls): # pylint:disable=g-missing-super-call tf.keras.backend.clear_session() def test_train_loop_then_eval_loop(self): """Tests that Estimator and input function are constructed correctly.""" model_dir = tf.test.get_temp_dir() pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) new_pipeline_config_path = os.path.join(model_dir, 'new_pipeline.config') config_util.clear_fine_tune_checkpoint(pipeline_config_path, new_pipeline_config_path) config_kwarg_overrides = _get_config_kwarg_overrides() train_steps = 2 strategy = tf2.distribute.MirroredStrategy(['/cpu:0', '/cpu:1']) with strategy.scope(): model_lib_v2.train_loop( new_pipeline_config_path, model_dir=model_dir, train_steps=train_steps, checkpoint_every_n=1, num_steps_per_iteration=1, **config_kwarg_overrides) model_lib_v2.eval_continuously( new_pipeline_config_path, model_dir=model_dir, checkpoint_dir=model_dir, train_steps=train_steps, wait_interval=1, timeout=10, **config_kwarg_overrides) class SimpleModel(model.DetectionModel): """A model with a single weight vector.""" def __init__(self, num_classes=1): super(SimpleModel, self).__init__(num_classes) self.weight = tf.keras.backend.variable(np.ones(10), name='weight') def postprocess(self, prediction_dict, true_image_shapes): return {} def updates(self): return [] def restore_map(self, *args, **kwargs): pass def restore_from_objects(self, fine_tune_checkpoint_type): return {'model': self} def preprocess(self, _): return tf.zeros((1, 128, 128, 3)), tf.constant([[128, 128, 3]]) def provide_groundtruth(self, *args, **kwargs): pass def predict(self, pred_inputs, true_image_shapes): return {'prediction': tf.abs(tf.reduce_sum(self.weight) * tf.reduce_sum(pred_inputs))} def loss(self, prediction_dict, _): return {'loss': tf.reduce_sum(prediction_dict['prediction'])} def regularization_losses(self): return [] def fake_model_builder(*_, **__): return SimpleModel() FAKE_BUILDER_MAP = {'detection_model_fn_base': fake_model_builder} @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ModelCheckpointTest(tf.test.TestCase): """Test for model checkpoint related functionality.""" def test_checkpoint_max_to_keep(self): """Test that only the most recent checkpoints are kept.""" strategy = tf2.distribute.OneDeviceStrategy(device='/cpu:0') with mock.patch.dict( model_lib_v2.MODEL_BUILD_UTIL_MAP, FAKE_BUILDER_MAP): model_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) new_pipeline_config_path = os.path.join(model_dir, 'new_pipeline.config') config_util.clear_fine_tune_checkpoint(pipeline_config_path, new_pipeline_config_path) config_kwarg_overrides = _get_config_kwarg_overrides() with strategy.scope(): model_lib_v2.train_loop( new_pipeline_config_path, model_dir=model_dir, train_steps=5, checkpoint_every_n=2, checkpoint_max_to_keep=3, num_steps_per_iteration=1, **config_kwarg_overrides ) ckpt_files = tf.io.gfile.glob(os.path.join(model_dir, 'ckpt-*.index')) self.assertEqual(len(ckpt_files), 3, '{} not of length 3.'.format(ckpt_files)) class IncompatibleModel(SimpleModel): def restore_from_objects(self, *args, **kwargs): return {'weight': self.weight} @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CheckpointV2Test(tf.test.TestCase): def setUp(self): super(CheckpointV2Test, self).setUp() self._model = SimpleModel() tf.keras.backend.set_value(self._model.weight, np.ones(10) * 42) ckpt = tf.train.Checkpoint(model=self._model) self._test_dir = tf.test.get_temp_dir() self._ckpt_path = ckpt.save(os.path.join(self._test_dir, 'ckpt')) tf.keras.backend.set_value(self._model.weight, np.ones(10)) pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=_get_config_kwarg_overrides()) self._train_input_fn = inputs.create_train_input_fn( configs['train_config'], configs['train_input_config'], configs['model']) def test_restore_v2(self): """Test that restoring a v2 style checkpoint works.""" model_lib_v2.load_fine_tune_checkpoint( self._model, self._ckpt_path, checkpoint_type='', checkpoint_version=train_pb2.CheckpointVersion.V2, run_model_on_dummy_input=True, input_dataset=self._train_input_fn(), unpad_groundtruth_tensors=True) np.testing.assert_allclose(self._model.weight.numpy(), 42) def test_restore_map_incompatible_error(self): """Test that restoring an incompatible restore map causes an error.""" with self.assertRaisesRegex(TypeError, r'.*received a \(str -> ResourceVariable\).*'): model_lib_v2.load_fine_tune_checkpoint( IncompatibleModel(), self._ckpt_path, checkpoint_type='', checkpoint_version=train_pb2.CheckpointVersion.V2, run_model_on_dummy_input=True, input_dataset=self._train_input_fn(), unpad_groundtruth_tensors=True) @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class MetricsExportTest(tf.test.TestCase): @classmethod def setUpClass(cls): # pylint:disable=g-missing-super-call tf.keras.backend.clear_session() def test_export_metrics_json_serializable(self): """Tests that Estimator and input function are constructed correctly.""" strategy = tf2.distribute.OneDeviceStrategy(device='/cpu:0') def export(data, _): json.dumps(data) with mock.patch.dict( exporter_lib_v2.INPUT_BUILDER_UTIL_MAP, FAKE_BUILDER_MAP): with strategy.scope(): model_dir = tf.test.get_temp_dir() new_pipeline_config_path = os.path.join(model_dir, 'new_pipeline.config') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) config_util.clear_fine_tune_checkpoint(pipeline_config_path, new_pipeline_config_path) train_steps = 2 with strategy.scope(): model_lib_v2.train_loop( new_pipeline_config_path, model_dir=model_dir, train_steps=train_steps, checkpoint_every_n=100, performance_summary_exporter=export, num_steps_per_iteration=1, **_get_config_kwarg_overrides()) def setUpModule(): # Setup virtual CPUs. cpus = tf.config.list_physical_devices('CPU') tf.config.set_logical_device_configuration( cpus[-1], [tf.config.LogicalDeviceConfiguration()] * 2 ) if __name__ == '__main__': tf.test.main()
9,990
34.05614
85
py
models
models-master/research/object_detection/eval_util_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for eval_util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from absl.testing import parameterized import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection import eval_util from object_detection.core import standard_fields as fields from object_detection.metrics import coco_evaluation from object_detection.protos import eval_pb2 from object_detection.utils import test_case from object_detection.utils import tf_version class EvalUtilTest(test_case.TestCase, parameterized.TestCase): def _get_categories_list(self): return [{'id': 1, 'name': 'person'}, {'id': 2, 'name': 'dog'}, {'id': 3, 'name': 'cat'}] def _get_categories_list_with_keypoints(self): return [{ 'id': 1, 'name': 'person', 'keypoints': { 'left_eye': 0, 'right_eye': 3 } }, { 'id': 2, 'name': 'dog', 'keypoints': { 'tail_start': 1, 'mouth': 2 } }, { 'id': 3, 'name': 'cat' }] def _make_evaluation_dict(self, resized_groundtruth_masks=False, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): input_data_fields = fields.InputDataFields detection_fields = fields.DetectionResultFields image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8) if batch_size == 1: key = tf.constant('image1') else: key = tf.constant([str(i) for i in range(batch_size)]) detection_boxes = tf.tile(tf.constant([[[0., 0., 1., 1.]]]), multiples=[batch_size, 1, 1]) detection_scores = tf.tile(tf.constant([[0.8]]), multiples=[batch_size, 1]) detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1]) detection_masks = tf.tile(tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32), multiples=[batch_size, 1, 1, 1]) num_detections = tf.ones([batch_size]) groundtruth_boxes = tf.constant([[0., 0., 1., 1.]]) groundtruth_classes = tf.constant([1]) groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8) original_image_spatial_shapes = tf.constant([[20, 20]], dtype=tf.int32) groundtruth_keypoints = tf.constant([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]]) if resized_groundtruth_masks: groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8) if batch_size > 1: groundtruth_boxes = tf.tile(tf.expand_dims(groundtruth_boxes, 0), multiples=[batch_size, 1, 1]) groundtruth_classes = tf.tile(tf.expand_dims(groundtruth_classes, 0), multiples=[batch_size, 1]) groundtruth_instance_masks = tf.tile( tf.expand_dims(groundtruth_instance_masks, 0), multiples=[batch_size, 1, 1, 1]) groundtruth_keypoints = tf.tile( tf.expand_dims(groundtruth_keypoints, 0), multiples=[batch_size, 1, 1]) original_image_spatial_shapes = tf.tile(original_image_spatial_shapes, multiples=[batch_size, 1]) detections = { detection_fields.detection_boxes: detection_boxes, detection_fields.detection_scores: detection_scores, detection_fields.detection_classes: detection_classes, detection_fields.detection_masks: detection_masks, detection_fields.num_detections: num_detections } groundtruth = { input_data_fields.groundtruth_boxes: groundtruth_boxes, input_data_fields.groundtruth_classes: groundtruth_classes, input_data_fields.groundtruth_keypoints: groundtruth_keypoints, input_data_fields.groundtruth_instance_masks: groundtruth_instance_masks, input_data_fields.original_image_spatial_shape: original_image_spatial_shapes } if batch_size > 1: return eval_util.result_dict_for_batched_example( image, key, detections, groundtruth, scale_to_absolute=scale_to_absolute, max_gt_boxes=max_gt_boxes) else: return eval_util.result_dict_for_single_example( image, key, detections, groundtruth, scale_to_absolute=scale_to_absolute) @parameterized.parameters( {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} ) @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op = metric_ops['DetectionBoxes_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in six.iteritems(metric_ops): metrics[key] = value_op sess.run(update_op) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertNotIn('DetectionMasks_Precision/mAP', metrics) @parameterized.parameters( {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} ) @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') def test_get_eval_metric_ops_for_coco_detections_and_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in six.iteritems(metric_ops): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) @parameterized.parameters( {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} ) @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') def test_get_eval_metric_ops_for_coco_detections_and_resized_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute, resized_groundtruth_masks=True) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in six.iteritems(metric_ops): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) @unittest.skipIf(tf_version.is_tf2(), 'Only compatible with TF1.X') def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['unsupported_metric']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict() with self.assertRaises(ValueError): eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend([ 'coco_detection_metrics', 'coco_mask_metrics', 'precision_at_recall_detection_metrics' ]) eval_config.include_metrics_per_category = True eval_config.recall_lower_bound = 0.2 eval_config.recall_upper_bound = 0.6 evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'] ['include_metrics_per_category']) self.assertFalse(evaluator_options['coco_detection_metrics'] ['skip_predictions_for_unlabeled_class']) self.assertTrue( evaluator_options['coco_mask_metrics']['include_metrics_per_category']) self.assertAlmostEqual( evaluator_options['precision_at_recall_detection_metrics'] ['recall_lower_bound'], eval_config.recall_lower_bound) self.assertAlmostEqual( evaluator_options['precision_at_recall_detection_metrics'] ['recall_upper_bound'], eval_config.recall_upper_bound) self.assertFalse(evaluator_options['precision_at_recall_detection_metrics'] ['skip_predictions_for_unlabeled_class']) def test_get_evaluator_with_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'precision_at_recall_detection_metrics']) eval_config.include_metrics_per_category = True eval_config.skip_predictions_for_unlabeled_class = True eval_config.recall_lower_bound = 0.2 eval_config.recall_upper_bound = 0.6 categories = self._get_categories_list() evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) evaluator = eval_util.get_evaluators(eval_config, categories, evaluator_options) self.assertTrue(evaluator[0]._include_metrics_per_category) self.assertTrue(evaluator[0]._skip_predictions_for_unlabeled_class) self.assertTrue(evaluator[1]._skip_predictions_for_unlabeled_class) self.assertAlmostEqual(evaluator[1]._recall_lower_bound, eval_config.recall_lower_bound) self.assertAlmostEqual(evaluator[1]._recall_upper_bound, eval_config.recall_upper_bound) def test_get_evaluator_with_no_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'precision_at_recall_detection_metrics']) eval_config.include_metrics_per_category = True eval_config.recall_lower_bound = 0.2 eval_config.recall_upper_bound = 0.6 categories = self._get_categories_list() evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options=None) # Even though we are setting eval_config.include_metrics_per_category = True # and bounds on recall, these options are never passed into the # DetectionEvaluator constructor (via `evaluator_options`). self.assertFalse(evaluator[0]._include_metrics_per_category) self.assertAlmostEqual(evaluator[1]._recall_lower_bound, 0.0) self.assertAlmostEqual(evaluator[1]._recall_upper_bound, 1.0) def test_get_evaluator_with_keypoint_metrics(self): eval_config = eval_pb2.EvalConfig() person_keypoints_metric = eval_config.parameterized_metric.add() person_keypoints_metric.coco_keypoint_metrics.class_label = 'person' person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ 'left_eye'] = 0.1 person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ 'right_eye'] = 0.2 dog_keypoints_metric = eval_config.parameterized_metric.add() dog_keypoints_metric.coco_keypoint_metrics.class_label = 'dog' dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ 'tail_start'] = 0.3 dog_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ 'mouth'] = 0.4 categories = self._get_categories_list_with_keypoints() evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options=None) # Verify keypoint evaluator class variables. self.assertLen(evaluator, 3) self.assertFalse(evaluator[0]._include_metrics_per_category) self.assertEqual(evaluator[1]._category_name, 'person') self.assertEqual(evaluator[2]._category_name, 'dog') self.assertAllEqual(evaluator[1]._keypoint_ids, [0, 3]) self.assertAllEqual(evaluator[2]._keypoint_ids, [1, 2]) self.assertAllClose([0.1, 0.2], evaluator[1]._oks_sigmas) self.assertAllClose([0.3, 0.4], evaluator[2]._oks_sigmas) def test_get_evaluator_with_unmatched_label(self): eval_config = eval_pb2.EvalConfig() person_keypoints_metric = eval_config.parameterized_metric.add() person_keypoints_metric.coco_keypoint_metrics.class_label = 'unmatched' person_keypoints_metric.coco_keypoint_metrics.keypoint_label_to_sigmas[ 'kpt'] = 0.1 categories = self._get_categories_list_with_keypoints() evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options=None) self.assertLen(evaluator, 1) self.assertNotIsInstance( evaluator[0], coco_evaluation.CocoKeypointEvaluator) def test_padded_image_result_dict(self): input_data_fields = fields.InputDataFields detection_fields = fields.DetectionResultFields key = tf.constant([str(i) for i in range(2)]) detection_boxes = np.array([[[0., 0., 1., 1.]], [[0.0, 0.0, 0.5, 0.5]]], dtype=np.float32) detection_keypoints = np.array([[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]], dtype=np.float32) def graph_fn(): detections = { detection_fields.detection_boxes: tf.constant(detection_boxes), detection_fields.detection_scores: tf.constant([[1.], [1.]]), detection_fields.detection_classes: tf.constant([[1], [2]]), detection_fields.num_detections: tf.constant([1, 1]), detection_fields.detection_keypoints: tf.tile( tf.reshape( tf.constant(detection_keypoints), shape=[1, 1, 3, 2]), multiples=[2, 1, 1, 1]) } gt_boxes = detection_boxes groundtruth = { input_data_fields.groundtruth_boxes: tf.constant(gt_boxes), input_data_fields.groundtruth_classes: tf.constant([[1.], [1.]]), input_data_fields.groundtruth_keypoints: tf.tile( tf.reshape( tf.constant(detection_keypoints), shape=[1, 1, 3, 2]), multiples=[2, 1, 1, 1]) } image = tf.zeros((2, 100, 100, 3), dtype=tf.float32) true_image_shapes = tf.constant([[100, 100, 3], [50, 100, 3]]) original_image_spatial_shapes = tf.constant([[200, 200], [150, 300]]) result = eval_util.result_dict_for_batched_example( image, key, detections, groundtruth, scale_to_absolute=True, true_image_shapes=true_image_shapes, original_image_spatial_shapes=original_image_spatial_shapes, max_gt_boxes=tf.constant(1)) return (result[input_data_fields.groundtruth_boxes], result[input_data_fields.groundtruth_keypoints], result[detection_fields.detection_boxes], result[detection_fields.detection_keypoints]) (gt_boxes, gt_keypoints, detection_boxes, detection_keypoints) = self.execute_cpu(graph_fn, []) self.assertAllEqual( [[[0., 0., 200., 200.]], [[0.0, 0.0, 150., 150.]]], gt_boxes) self.assertAllClose([[[[0., 0.], [100., 100.], [200., 200.]]], [[[0., 0.], [150., 150.], [300., 300.]]]], gt_keypoints) # Predictions from the model are not scaled. self.assertAllEqual( [[[0., 0., 200., 200.]], [[0.0, 0.0, 75., 150.]]], detection_boxes) self.assertAllClose([[[[0., 0.], [100., 100.], [200., 200.]]], [[[0., 0.], [75., 150.], [150., 300.]]]], detection_keypoints) def test_evaluator_options_from_eval_config_no_super_categories(self): eval_config_text_proto = """ metrics_set: "coco_detection_metrics" metrics_set: "coco_mask_metrics" include_metrics_per_category: true use_moving_averages: false batch_size: 1; """ eval_config = eval_pb2.EvalConfig() text_format.Merge(eval_config_text_proto, eval_config) evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertNotIn('super_categories', evaluator_options['coco_mask_metrics']) def test_evaluator_options_from_eval_config_with_super_categories(self): eval_config_text_proto = """ metrics_set: "coco_detection_metrics" metrics_set: "coco_mask_metrics" include_metrics_per_category: true use_moving_averages: false batch_size: 1; super_categories { key: "supercat1" value: "a,b,c" } super_categories { key: "supercat2" value: "d,e,f" } """ eval_config = eval_pb2.EvalConfig() text_format.Merge(eval_config_text_proto, eval_config) evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertIn('super_categories', evaluator_options['coco_mask_metrics']) super_categories = evaluator_options[ 'coco_mask_metrics']['super_categories'] self.assertIn('supercat1', super_categories) self.assertIn('supercat2', super_categories) self.assertAllEqual(super_categories['supercat1'], ['a', 'b', 'c']) self.assertAllEqual(super_categories['supercat2'], ['d', 'e', 'f']) if __name__ == '__main__': tf.test.main()
20,372
42.812903
80
py
models
models-master/research/object_detection/exporter_main_v2.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Tool to export an object detection model for inference. Prepares an object detection tensorflow graph for inference using model configuration and a trained checkpoint. Outputs associated checkpoint files, a SavedModel, and a copy of the model config. The inference graph contains one of three input nodes depending on the user specified option. * `image_tensor`: Accepts a uint8 4-D tensor of shape [1, None, None, 3] * `float_image_tensor`: Accepts a float32 4-D tensor of shape [1, None, None, 3] * `encoded_image_string_tensor`: Accepts a 1-D string tensor of shape [None] containing encoded PNG or JPEG images. Image resolutions are expected to be the same if more than 1 image is provided. * `tf_example`: Accepts a 1-D string tensor of shape [None] containing serialized TFExample protos. Image resolutions are expected to be the same if more than 1 image is provided. * `image_and_boxes_tensor`: Accepts a 4-D image tensor of size [1, None, None, 3] and a boxes tensor of size [1, None, 4] of normalized bounding boxes. To be able to support this option, the model needs to implement a predict_masks_from_boxes method. See the documentation for DetectionFromImageAndBoxModule for details. and the following output nodes returned by the model.postprocess(..): * `num_detections`: Outputs float32 tensors of the form [batch] that specifies the number of valid boxes per image in the batch. * `detection_boxes`: Outputs float32 tensors of the form [batch, num_boxes, 4] containing detected boxes. * `detection_scores`: Outputs float32 tensors of the form [batch, num_boxes] containing class scores for the detections. * `detection_classes`: Outputs float32 tensors of the form [batch, num_boxes] containing classes for the detections. Example Usage: -------------- python exporter_main_v2.py \ --input_type image_tensor \ --pipeline_config_path path/to/ssd_inception_v2.config \ --trained_checkpoint_dir path/to/checkpoint \ --output_directory path/to/exported_model_directory --use_side_inputs True/False \ --side_input_shapes dim_0,dim_1,...dim_a/.../dim_0,dim_1,...,dim_z \ --side_input_names name_a,name_b,...,name_c \ --side_input_types type_1,type_2 The expected output would be in the directory path/to/exported_model_directory (which is created if it does not exist) holding two subdirectories (corresponding to checkpoint and SavedModel, respectively) and a copy of the pipeline config. Config overrides (see the `config_override` flag) are text protobufs (also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override certain fields in the provided pipeline_config_path. These are useful for making small changes to the inference graph that differ from the training or eval config. Example Usage (in which we change the second stage post-processing score threshold to be 0.5): python exporter_main_v2.py \ --input_type image_tensor \ --pipeline_config_path path/to/ssd_inception_v2.config \ --trained_checkpoint_dir path/to/checkpoint \ --output_directory path/to/exported_model_directory \ --config_override " \ model{ \ faster_rcnn { \ second_stage_post_processing { \ batch_non_max_suppression { \ score_threshold: 0.5 \ } \ } \ } \ }" If side inputs are desired, the following arguments could be appended (the example below is for Context R-CNN). --use_side_inputs True \ --side_input_shapes 1,2000,2057/1 \ --side_input_names context_features,valid_context_size \ --side_input_types tf.float32,tf.int32 """ from absl import app from absl import flags import tensorflow.compat.v2 as tf from google.protobuf import text_format from object_detection import exporter_lib_v2 from object_detection.protos import pipeline_pb2 tf.enable_v2_behavior() FLAGS = flags.FLAGS flags.DEFINE_string('input_type', 'image_tensor', 'Type of input node. Can be ' 'one of [`image_tensor`, `encoded_image_string_tensor`, ' '`tf_example`, `float_image_tensor`, ' '`image_and_boxes_tensor`]') flags.DEFINE_string('pipeline_config_path', None, 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file.') flags.DEFINE_string('trained_checkpoint_dir', None, 'Path to trained checkpoint directory') flags.DEFINE_string('output_directory', None, 'Path to write outputs.') flags.DEFINE_string('config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' 'text proto to override pipeline_config_path.') flags.DEFINE_boolean('use_side_inputs', False, 'If True, uses side inputs as well as image inputs.') flags.DEFINE_string('side_input_shapes', '', 'If use_side_inputs is True, this explicitly sets ' 'the shape of the side input tensors to a fixed size. The ' 'dimensions are to be provided as a comma-separated list ' 'of integers. A value of -1 can be used for unknown ' 'dimensions. A `/` denotes a break, starting the shape of ' 'the next side input tensor. This flag is required if ' 'using side inputs.') flags.DEFINE_string('side_input_types', '', 'If use_side_inputs is True, this explicitly sets ' 'the type of the side input tensors. The ' 'dimensions are to be provided as a comma-separated list ' 'of types, each of `string`, `integer`, or `float`. ' 'This flag is required if using side inputs.') flags.DEFINE_string('side_input_names', '', 'If use_side_inputs is True, this explicitly sets ' 'the names of the side input tensors required by the model ' 'assuming the names will be a comma-separated list of ' 'strings. This flag is required if using side inputs.') flags.mark_flag_as_required('pipeline_config_path') flags.mark_flag_as_required('trained_checkpoint_dir') flags.mark_flag_as_required('output_directory') def main(_): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) text_format.Merge(FLAGS.config_override, pipeline_config) exporter_lib_v2.export_inference_graph( FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir, FLAGS.output_directory, FLAGS.use_side_inputs, FLAGS.side_input_shapes, FLAGS.side_input_types, FLAGS.side_input_names) if __name__ == '__main__': app.run(main)
7,577
44.927273
80
py
models
models-master/research/object_detection/exporter.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions to export object detection inference graph.""" import os import tempfile import tensorflow.compat.v1 as tf import tf_slim as slim from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.tools import freeze_graph # pylint: disable=g-direct-tensorflow-import from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.core import standard_fields as fields from object_detection.data_decoders import tf_example_decoder from object_detection.utils import config_util from object_detection.utils import shape_utils # pylint: disable=g-import-not-at-top try: from tensorflow.contrib import tfprof as contrib_tfprof from tensorflow.contrib.quantize.python import graph_matcher except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top freeze_graph_with_def_protos = freeze_graph.freeze_graph_with_def_protos def parse_side_inputs(side_input_shapes_string, side_input_names_string, side_input_types_string): """Parses side input flags. Args: side_input_shapes_string: The shape of the side input tensors, provided as a comma-separated list of integers. A value of -1 is used for unknown dimensions. A `/` denotes a break, starting the shape of the next side input tensor. side_input_names_string: The names of the side input tensors, provided as a comma-separated list of strings. side_input_types_string: The type of the side input tensors, provided as a comma-separated list of types, each of `string`, `integer`, or `float`. Returns: side_input_shapes: A list of shapes. side_input_names: A list of strings. side_input_types: A list of tensorflow dtypes. """ if side_input_shapes_string: side_input_shapes = [] for side_input_shape_list in side_input_shapes_string.split('/'): side_input_shape = [ int(dim) if dim != '-1' else None for dim in side_input_shape_list.split(',') ] side_input_shapes.append(side_input_shape) else: raise ValueError('When using side_inputs, side_input_shapes must be ' 'specified in the input flags.') if side_input_names_string: side_input_names = list(side_input_names_string.split(',')) else: raise ValueError('When using side_inputs, side_input_names must be ' 'specified in the input flags.') if side_input_types_string: typelookup = {'float': tf.float32, 'int': tf.int32, 'string': tf.string} side_input_types = [ typelookup[side_input_type] for side_input_type in side_input_types_string.split(',') ] else: raise ValueError('When using side_inputs, side_input_types must be ' 'specified in the input flags.') return side_input_shapes, side_input_names, side_input_types def rewrite_nn_resize_op(is_quantized=False): """Replaces a custom nearest-neighbor resize op with the Tensorflow version. Some graphs use this custom version for TPU-compatibility. Args: is_quantized: True if the default graph is quantized. """ def remove_nn(): """Remove nearest neighbor upsampling structures and replace with TF op.""" input_pattern = graph_matcher.OpTypePattern( 'FakeQuantWithMinMaxVars' if is_quantized else '*') stack_1_pattern = graph_matcher.OpTypePattern( 'Pack', inputs=[input_pattern, input_pattern], ordered_inputs=False) reshape_1_pattern = graph_matcher.OpTypePattern( 'Reshape', inputs=[stack_1_pattern, 'Const'], ordered_inputs=False) stack_2_pattern = graph_matcher.OpTypePattern( 'Pack', inputs=[reshape_1_pattern, reshape_1_pattern], ordered_inputs=False) reshape_2_pattern = graph_matcher.OpTypePattern( 'Reshape', inputs=[stack_2_pattern, 'Const'], ordered_inputs=False) consumer_pattern1 = graph_matcher.OpTypePattern( 'Add|AddV2|Max|Mul', inputs=[reshape_2_pattern, '*'], ordered_inputs=False) consumer_pattern2 = graph_matcher.OpTypePattern( 'StridedSlice', inputs=[reshape_2_pattern, '*', '*', '*'], ordered_inputs=False) def replace_matches(consumer_pattern): """Search for nearest neighbor pattern and replace with TF op.""" match_counter = 0 matcher = graph_matcher.GraphMatcher(consumer_pattern) for match in matcher.match_graph(tf.get_default_graph()): match_counter += 1 projection_op = match.get_op(input_pattern) reshape_2_op = match.get_op(reshape_2_pattern) consumer_op = match.get_op(consumer_pattern) nn_resize = tf.image.resize_nearest_neighbor( projection_op.outputs[0], reshape_2_op.outputs[0].shape.dims[1:3], align_corners=False, name=os.path.split(reshape_2_op.name)[0] + '/resize_nearest_neighbor') for index, op_input in enumerate(consumer_op.inputs): if op_input == reshape_2_op.outputs[0]: consumer_op._update_input(index, nn_resize) # pylint: disable=protected-access break return match_counter match_counter = replace_matches(consumer_pattern1) match_counter += replace_matches(consumer_pattern2) tf.logging.info('Found and fixed {} matches'.format(match_counter)) return match_counter # Applying twice because both inputs to Add could be NN pattern total_removals = 0 while remove_nn(): total_removals += 1 # This number is chosen based on the nas-fpn architecture. if total_removals > 4: raise ValueError('Graph removal encountered a infinite loop.') def replace_variable_values_with_moving_averages(graph, current_checkpoint_file, new_checkpoint_file, no_ema_collection=None): """Replaces variable values in the checkpoint with their moving averages. If the current checkpoint has shadow variables maintaining moving averages of the variables defined in the graph, this function generates a new checkpoint where the variables contain the values of their moving averages. Args: graph: a tf.Graph object. current_checkpoint_file: a checkpoint containing both original variables and their moving averages. new_checkpoint_file: file path to write a new checkpoint. no_ema_collection: A list of namescope substrings to match the variables to eliminate EMA. """ with graph.as_default(): variable_averages = tf.train.ExponentialMovingAverage(0.0) ema_variables_to_restore = variable_averages.variables_to_restore() ema_variables_to_restore = config_util.remove_unnecessary_ema( ema_variables_to_restore, no_ema_collection) with tf.Session() as sess: read_saver = tf.train.Saver(ema_variables_to_restore) read_saver.restore(sess, current_checkpoint_file) write_saver = tf.train.Saver() write_saver.save(sess, new_checkpoint_file) def _image_tensor_input_placeholder(input_shape=None): """Returns input placeholder and a 4-D uint8 image tensor.""" if input_shape is None: input_shape = (None, None, None, 3) input_tensor = tf.placeholder( dtype=tf.uint8, shape=input_shape, name='image_tensor') return input_tensor, input_tensor def _side_input_tensor_placeholder(side_input_shape, side_input_name, side_input_type): """Returns side input placeholder and side input tensor.""" side_input_tensor = tf.placeholder( dtype=side_input_type, shape=side_input_shape, name=side_input_name) return side_input_tensor, side_input_tensor def _tf_example_input_placeholder(input_shape=None): """Returns input that accepts a batch of strings with tf examples. Args: input_shape: the shape to resize the output decoded images to (optional). Returns: a tuple of input placeholder and the output decoded images. """ batch_tf_example_placeholder = tf.placeholder( tf.string, shape=[None], name='tf_example') def decode(tf_example_string_tensor): tensor_dict = tf_example_decoder.TfExampleDecoder().decode( tf_example_string_tensor) image_tensor = tensor_dict[fields.InputDataFields.image] if input_shape is not None: image_tensor = tf.image.resize(image_tensor, input_shape[1:3]) return image_tensor return (batch_tf_example_placeholder, shape_utils.static_or_dynamic_map_fn( decode, elems=batch_tf_example_placeholder, dtype=tf.uint8, parallel_iterations=32, back_prop=False)) def _encoded_image_string_tensor_input_placeholder(input_shape=None): """Returns input that accepts a batch of PNG or JPEG strings. Args: input_shape: the shape to resize the output decoded images to (optional). Returns: a tuple of input placeholder and the output decoded images. """ batch_image_str_placeholder = tf.placeholder( dtype=tf.string, shape=[None], name='encoded_image_string_tensor') def decode(encoded_image_string_tensor): image_tensor = tf.image.decode_image(encoded_image_string_tensor, channels=3) image_tensor.set_shape((None, None, 3)) if input_shape is not None: image_tensor = tf.image.resize(image_tensor, input_shape[1:3]) return image_tensor return (batch_image_str_placeholder, tf.map_fn( decode, elems=batch_image_str_placeholder, dtype=tf.uint8, parallel_iterations=32, back_prop=False)) input_placeholder_fn_map = { 'image_tensor': _image_tensor_input_placeholder, 'encoded_image_string_tensor': _encoded_image_string_tensor_input_placeholder, 'tf_example': _tf_example_input_placeholder } def add_output_tensor_nodes(postprocessed_tensors, output_collection_name='inference_op'): """Adds output nodes for detection boxes and scores. Adds the following nodes for output tensors - * num_detections: float32 tensor of shape [batch_size]. * detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4] containing detected boxes. * detection_scores: float32 tensor of shape [batch_size, num_boxes] containing scores for the detected boxes. * detection_multiclass_scores: (Optional) float32 tensor of shape [batch_size, num_boxes, num_classes_with_background] for containing class score distribution for detected boxes including background if any. * detection_features: (Optional) float32 tensor of shape [batch, num_boxes, roi_height, roi_width, depth] containing classifier features for each detected box * detection_classes: float32 tensor of shape [batch_size, num_boxes] containing class predictions for the detected boxes. * detection_keypoints: (Optional) float32 tensor of shape [batch_size, num_boxes, num_keypoints, 2] containing keypoints for each detection box. * detection_masks: (Optional) float32 tensor of shape [batch_size, num_boxes, mask_height, mask_width] containing masks for each detection box. Args: postprocessed_tensors: a dictionary containing the following fields 'detection_boxes': [batch, max_detections, 4] 'detection_scores': [batch, max_detections] 'detection_multiclass_scores': [batch, max_detections, num_classes_with_background] 'detection_features': [batch, num_boxes, roi_height, roi_width, depth] 'detection_classes': [batch, max_detections] 'detection_masks': [batch, max_detections, mask_height, mask_width] (optional). 'detection_keypoints': [batch, max_detections, num_keypoints, 2] (optional). 'num_detections': [batch] output_collection_name: Name of collection to add output tensors to. Returns: A tensor dict containing the added output tensor nodes. """ detection_fields = fields.DetectionResultFields label_id_offset = 1 boxes = postprocessed_tensors.get(detection_fields.detection_boxes) scores = postprocessed_tensors.get(detection_fields.detection_scores) multiclass_scores = postprocessed_tensors.get( detection_fields.detection_multiclass_scores) box_classifier_features = postprocessed_tensors.get( detection_fields.detection_features) raw_boxes = postprocessed_tensors.get(detection_fields.raw_detection_boxes) raw_scores = postprocessed_tensors.get(detection_fields.raw_detection_scores) classes = postprocessed_tensors.get( detection_fields.detection_classes) + label_id_offset keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints) masks = postprocessed_tensors.get(detection_fields.detection_masks) num_detections = postprocessed_tensors.get(detection_fields.num_detections) outputs = {} outputs[detection_fields.detection_boxes] = tf.identity( boxes, name=detection_fields.detection_boxes) outputs[detection_fields.detection_scores] = tf.identity( scores, name=detection_fields.detection_scores) if multiclass_scores is not None: outputs[detection_fields.detection_multiclass_scores] = tf.identity( multiclass_scores, name=detection_fields.detection_multiclass_scores) if box_classifier_features is not None: outputs[detection_fields.detection_features] = tf.identity( box_classifier_features, name=detection_fields.detection_features) outputs[detection_fields.detection_classes] = tf.identity( classes, name=detection_fields.detection_classes) outputs[detection_fields.num_detections] = tf.identity( num_detections, name=detection_fields.num_detections) if raw_boxes is not None: outputs[detection_fields.raw_detection_boxes] = tf.identity( raw_boxes, name=detection_fields.raw_detection_boxes) if raw_scores is not None: outputs[detection_fields.raw_detection_scores] = tf.identity( raw_scores, name=detection_fields.raw_detection_scores) if keypoints is not None: outputs[detection_fields.detection_keypoints] = tf.identity( keypoints, name=detection_fields.detection_keypoints) if masks is not None: outputs[detection_fields.detection_masks] = tf.identity( masks, name=detection_fields.detection_masks) for output_key in outputs: tf.add_to_collection(output_collection_name, outputs[output_key]) return outputs def write_saved_model(saved_model_path, frozen_graph_def, inputs, outputs): """Writes SavedModel to disk. If checkpoint_path is not None bakes the weights into the graph thereby eliminating the need of checkpoint files during inference. If the model was trained with moving averages, setting use_moving_averages to true restores the moving averages, otherwise the original set of variables is restored. Args: saved_model_path: Path to write SavedModel. frozen_graph_def: tf.GraphDef holding frozen graph. inputs: A tensor dictionary containing the inputs to a DetectionModel. outputs: A tensor dictionary containing the outputs of a DetectionModel. """ with tf.Graph().as_default(): with tf.Session() as sess: tf.import_graph_def(frozen_graph_def, name='') builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path) tensor_info_inputs = {} if isinstance(inputs, dict): for k, v in inputs.items(): tensor_info_inputs[k] = tf.saved_model.utils.build_tensor_info(v) else: tensor_info_inputs['inputs'] = tf.saved_model.utils.build_tensor_info( inputs) tensor_info_outputs = {} for k, v in outputs.items(): tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v) detection_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs=tensor_info_inputs, outputs=tensor_info_outputs, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ tf.saved_model.signature_constants .DEFAULT_SERVING_SIGNATURE_DEF_KEY: detection_signature, }, ) builder.save() def write_graph_and_checkpoint(inference_graph_def, model_path, input_saver_def, trained_checkpoint_prefix): """Writes the graph and the checkpoint into disk.""" for node in inference_graph_def.node: node.device = '' with tf.Graph().as_default(): tf.import_graph_def(inference_graph_def, name='') with tf.Session() as sess: saver = tf.train.Saver( saver_def=input_saver_def, save_relative_paths=True) saver.restore(sess, trained_checkpoint_prefix) saver.save(sess, model_path) def _get_outputs_from_inputs(input_tensors, detection_model, output_collection_name, **side_inputs): inputs = tf.cast(input_tensors, dtype=tf.float32) preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs) output_tensors = detection_model.predict( preprocessed_inputs, true_image_shapes, **side_inputs) postprocessed_tensors = detection_model.postprocess( output_tensors, true_image_shapes) return add_output_tensor_nodes(postprocessed_tensors, output_collection_name) def build_detection_graph(input_type, detection_model, input_shape, output_collection_name, graph_hook_fn, use_side_inputs=False, side_input_shapes=None, side_input_names=None, side_input_types=None): """Build the detection graph.""" if input_type not in input_placeholder_fn_map: raise ValueError('Unknown input type: {}'.format(input_type)) placeholder_args = {} side_inputs = {} if input_shape is not None: if (input_type != 'image_tensor' and input_type != 'encoded_image_string_tensor' and input_type != 'tf_example' and input_type != 'tf_sequence_example'): raise ValueError('Can only specify input shape for `image_tensor`, ' '`encoded_image_string_tensor`, `tf_example`, ' ' or `tf_sequence_example` inputs.') placeholder_args['input_shape'] = input_shape placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type]( **placeholder_args) placeholder_tensors = {'inputs': placeholder_tensor} if use_side_inputs: for idx, side_input_name in enumerate(side_input_names): side_input_placeholder, side_input = _side_input_tensor_placeholder( side_input_shapes[idx], side_input_name, side_input_types[idx]) print(side_input) side_inputs[side_input_name] = side_input placeholder_tensors[side_input_name] = side_input_placeholder outputs = _get_outputs_from_inputs( input_tensors=input_tensors, detection_model=detection_model, output_collection_name=output_collection_name, **side_inputs) # Add global step to the graph. slim.get_or_create_global_step() if graph_hook_fn: graph_hook_fn() return outputs, placeholder_tensors def _export_inference_graph(input_type, detection_model, use_moving_averages, trained_checkpoint_prefix, output_directory, additional_output_tensor_names=None, input_shape=None, output_collection_name='inference_op', graph_hook_fn=None, write_inference_graph=False, temp_checkpoint_prefix='', use_side_inputs=False, side_input_shapes=None, side_input_names=None, side_input_types=None): """Export helper.""" tf.gfile.MakeDirs(output_directory) frozen_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') saved_model_path = os.path.join(output_directory, 'saved_model') model_path = os.path.join(output_directory, 'model.ckpt') outputs, placeholder_tensor_dict = build_detection_graph( input_type=input_type, detection_model=detection_model, input_shape=input_shape, output_collection_name=output_collection_name, graph_hook_fn=graph_hook_fn, use_side_inputs=use_side_inputs, side_input_shapes=side_input_shapes, side_input_names=side_input_names, side_input_types=side_input_types) profile_inference_graph(tf.get_default_graph()) saver_kwargs = {} if use_moving_averages: if not temp_checkpoint_prefix: # This check is to be compatible with both version of SaverDef. if os.path.isfile(trained_checkpoint_prefix): saver_kwargs['write_version'] = saver_pb2.SaverDef.V1 temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name else: temp_checkpoint_prefix = tempfile.mkdtemp() replace_variable_values_with_moving_averages( tf.get_default_graph(), trained_checkpoint_prefix, temp_checkpoint_prefix) checkpoint_to_use = temp_checkpoint_prefix else: checkpoint_to_use = trained_checkpoint_prefix saver = tf.train.Saver(**saver_kwargs) input_saver_def = saver.as_saver_def() write_graph_and_checkpoint( inference_graph_def=tf.get_default_graph().as_graph_def(), model_path=model_path, input_saver_def=input_saver_def, trained_checkpoint_prefix=checkpoint_to_use) if write_inference_graph: inference_graph_def = tf.get_default_graph().as_graph_def() inference_graph_path = os.path.join(output_directory, 'inference_graph.pbtxt') for node in inference_graph_def.node: node.device = '' with tf.gfile.GFile(inference_graph_path, 'wb') as f: f.write(str(inference_graph_def)) if additional_output_tensor_names is not None: output_node_names = ','.join(list(outputs.keys())+( additional_output_tensor_names)) else: output_node_names = ','.join(outputs.keys()) frozen_graph_def = freeze_graph.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=input_saver_def, input_checkpoint=checkpoint_to_use, output_node_names=output_node_names, restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', output_graph=frozen_graph_path, clear_devices=True, initializer_nodes='') write_saved_model(saved_model_path, frozen_graph_def, placeholder_tensor_dict, outputs) def export_inference_graph(input_type, pipeline_config, trained_checkpoint_prefix, output_directory, input_shape=None, output_collection_name='inference_op', additional_output_tensor_names=None, write_inference_graph=False, use_side_inputs=False, side_input_shapes=None, side_input_names=None, side_input_types=None): """Exports inference graph for the model specified in the pipeline config. Args: input_type: Type of input for the graph. Can be one of ['image_tensor', 'encoded_image_string_tensor', 'tf_example']. pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. trained_checkpoint_prefix: Path to the trained checkpoint file. output_directory: Path to write outputs. input_shape: Sets a fixed shape for an `image_tensor` input. If not specified, will default to [None, None, None, 3]. output_collection_name: Name of collection to add output tensors to. If None, does not add output tensors to a collection. additional_output_tensor_names: list of additional output tensors to include in the frozen graph. write_inference_graph: If true, writes inference graph to disk. use_side_inputs: If True, the model requires side_inputs. side_input_shapes: List of shapes of the side input tensors, required if use_side_inputs is True. side_input_names: List of names of the side input tensors, required if use_side_inputs is True. side_input_types: List of types of the side input tensors, required if use_side_inputs is True. """ detection_model = model_builder.build(pipeline_config.model, is_training=False) graph_rewriter_fn = None if pipeline_config.HasField('graph_rewriter'): graph_rewriter_config = pipeline_config.graph_rewriter graph_rewriter_fn = graph_rewriter_builder.build(graph_rewriter_config, is_training=False) _export_inference_graph( input_type, detection_model, pipeline_config.eval_config.use_moving_averages, trained_checkpoint_prefix, output_directory, additional_output_tensor_names, input_shape, output_collection_name, graph_hook_fn=graph_rewriter_fn, write_inference_graph=write_inference_graph, use_side_inputs=use_side_inputs, side_input_shapes=side_input_shapes, side_input_names=side_input_names, side_input_types=side_input_types) pipeline_config.eval_config.use_moving_averages = False config_util.save_pipeline_config(pipeline_config, output_directory) def profile_inference_graph(graph): """Profiles the inference graph. Prints model parameters and computation FLOPs given an inference graph. BatchNorms are excluded from the parameter count due to the fact that BatchNorms are usually folded. BatchNorm, Initializer, Regularizer and BiasAdd are not considered in FLOP count. Args: graph: the inference graph. """ tfprof_vars_option = ( contrib_tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS) tfprof_flops_option = contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS # Batchnorm is usually folded during inference. tfprof_vars_option['trim_name_regexes'] = ['.*BatchNorm.*'] # Initializer and Regularizer are only used in training. tfprof_flops_option['trim_name_regexes'] = [ '.*BatchNorm.*', '.*Initializer.*', '.*Regularizer.*', '.*BiasAdd.*' ] contrib_tfprof.model_analyzer.print_model_analysis( graph, tfprof_options=tfprof_vars_option) contrib_tfprof.model_analyzer.print_model_analysis( graph, tfprof_options=tfprof_flops_option)
27,823
40.903614
94
py
models
models-master/research/object_detection/eval_util.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common utility functions for evaluation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import re import time import numpy as np from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import keypoint_ops from object_detection.core import standard_fields as fields from object_detection.metrics import coco_evaluation from object_detection.metrics import lvis_evaluation from object_detection.protos import eval_pb2 from object_detection.utils import label_map_util from object_detection.utils import object_detection_evaluation from object_detection.utils import ops from object_detection.utils import shape_utils from object_detection.utils import visualization_utils as vis_utils EVAL_KEYPOINT_METRIC = 'coco_keypoint_metrics' # A dictionary of metric names to classes that implement the metric. The classes # in the dictionary must implement # utils.object_detection_evaluation.DetectionEvaluator interface. EVAL_METRICS_CLASS_DICT = { 'coco_detection_metrics': coco_evaluation.CocoDetectionEvaluator, 'coco_keypoint_metrics': coco_evaluation.CocoKeypointEvaluator, 'coco_mask_metrics': coco_evaluation.CocoMaskEvaluator, 'coco_panoptic_metrics': coco_evaluation.CocoPanopticSegmentationEvaluator, 'lvis_mask_metrics': lvis_evaluation.LVISMaskEvaluator, 'oid_challenge_detection_metrics': object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, 'oid_challenge_segmentation_metrics': object_detection_evaluation .OpenImagesInstanceSegmentationChallengeEvaluator, 'pascal_voc_detection_metrics': object_detection_evaluation.PascalDetectionEvaluator, 'weighted_pascal_voc_detection_metrics': object_detection_evaluation.WeightedPascalDetectionEvaluator, 'precision_at_recall_detection_metrics': object_detection_evaluation.PrecisionAtRecallDetectionEvaluator, 'pascal_voc_instance_segmentation_metrics': object_detection_evaluation.PascalInstanceSegmentationEvaluator, 'weighted_pascal_voc_instance_segmentation_metrics': object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator, 'oid_V2_detection_metrics': object_detection_evaluation.OpenImagesDetectionEvaluator, } EVAL_DEFAULT_METRIC = 'coco_detection_metrics' def write_metrics(metrics, global_step, summary_dir): """Write metrics to a summary directory. Args: metrics: A dictionary containing metric names and values. global_step: Global step at which the metrics are computed. summary_dir: Directory to write tensorflow summaries to. """ tf.logging.info('Writing metrics to tf summary.') summary_writer = tf.summary.FileWriterCache.get(summary_dir) for key in sorted(metrics): summary = tf.Summary(value=[ tf.Summary.Value(tag=key, simple_value=metrics[key]), ]) summary_writer.add_summary(summary, global_step) tf.logging.info('%s: %f', key, metrics[key]) tf.logging.info('Metrics written to tf summary.') # TODO(rathodv): Add tests. def visualize_detection_results(result_dict, tag, global_step, categories, summary_dir='', export_dir='', agnostic_mode=False, show_groundtruth=False, groundtruth_box_visualization_color='black', min_score_thresh=.5, max_num_predictions=20, skip_scores=False, skip_labels=False, keep_image_id_for_visualization_export=False): """Visualizes detection results and writes visualizations to image summaries. This function visualizes an image with its detected bounding boxes and writes to image summaries which can be viewed on tensorboard. It optionally also writes images to a directory. In the case of missing entry in the label map, unknown class name in the visualization is shown as "N/A". Args: result_dict: a dictionary holding groundtruth and detection data corresponding to each image being evaluated. The following keys are required: 'original_image': a numpy array representing the image with shape [1, height, width, 3] or [1, height, width, 1] 'detection_boxes': a numpy array of shape [N, 4] 'detection_scores': a numpy array of shape [N] 'detection_classes': a numpy array of shape [N] The following keys are optional: 'groundtruth_boxes': a numpy array of shape [N, 4] 'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2] Detections are assumed to be provided in decreasing order of score and for display, and we assume that scores are probabilities between 0 and 1. tag: tensorboard tag (string) to associate with image. global_step: global step at which the visualization are generated. categories: a list of dictionaries representing all possible categories. Each dict in this list has the following keys: 'id': (required) an integer id uniquely identifying this category 'name': (required) string representing category name e.g., 'cat', 'dog', 'pizza' 'supercategory': (optional) string representing the supercategory e.g., 'animal', 'vehicle', 'food', etc summary_dir: the output directory to which the image summaries are written. export_dir: the output directory to which images are written. If this is empty (default), then images are not exported. agnostic_mode: boolean (default: False) controlling whether to evaluate in class-agnostic mode or not. show_groundtruth: boolean (default: False) controlling whether to show groundtruth boxes in addition to detected boxes groundtruth_box_visualization_color: box color for visualizing groundtruth boxes min_score_thresh: minimum score threshold for a box to be visualized max_num_predictions: maximum number of detections to visualize skip_scores: whether to skip score when drawing a single detection skip_labels: whether to skip label when drawing a single detection keep_image_id_for_visualization_export: whether to keep image identifier in filename when exported to export_dir Raises: ValueError: if result_dict does not contain the expected keys (i.e., 'original_image', 'detection_boxes', 'detection_scores', 'detection_classes') """ detection_fields = fields.DetectionResultFields input_fields = fields.InputDataFields if not set([ input_fields.original_image, detection_fields.detection_boxes, detection_fields.detection_scores, detection_fields.detection_classes, ]).issubset(set(result_dict.keys())): raise ValueError('result_dict does not contain all expected keys.') if show_groundtruth and input_fields.groundtruth_boxes not in result_dict: raise ValueError('If show_groundtruth is enabled, result_dict must contain ' 'groundtruth_boxes.') tf.logging.info('Creating detection visualizations.') category_index = label_map_util.create_category_index(categories) image = np.squeeze(result_dict[input_fields.original_image], axis=0) if image.shape[2] == 1: # If one channel image, repeat in RGB. image = np.tile(image, [1, 1, 3]) detection_boxes = result_dict[detection_fields.detection_boxes] detection_scores = result_dict[detection_fields.detection_scores] detection_classes = np.int32((result_dict[ detection_fields.detection_classes])) detection_keypoints = result_dict.get(detection_fields.detection_keypoints) detection_masks = result_dict.get(detection_fields.detection_masks) detection_boundaries = result_dict.get(detection_fields.detection_boundaries) # Plot groundtruth underneath detections if show_groundtruth: groundtruth_boxes = result_dict[input_fields.groundtruth_boxes] groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints) vis_utils.visualize_boxes_and_labels_on_image_array( image=image, boxes=groundtruth_boxes, classes=None, scores=None, category_index=category_index, keypoints=groundtruth_keypoints, use_normalized_coordinates=False, max_boxes_to_draw=None, groundtruth_box_visualization_color=groundtruth_box_visualization_color) vis_utils.visualize_boxes_and_labels_on_image_array( image, detection_boxes, detection_classes, detection_scores, category_index, instance_masks=detection_masks, instance_boundaries=detection_boundaries, keypoints=detection_keypoints, use_normalized_coordinates=False, max_boxes_to_draw=max_num_predictions, min_score_thresh=min_score_thresh, agnostic_mode=agnostic_mode, skip_scores=skip_scores, skip_labels=skip_labels) if export_dir: if keep_image_id_for_visualization_export and result_dict[fields. InputDataFields() .key]: export_path = os.path.join(export_dir, 'export-{}-{}.png'.format( tag, result_dict[fields.InputDataFields().key])) else: export_path = os.path.join(export_dir, 'export-{}.png'.format(tag)) vis_utils.save_image_array_as_png(image, export_path) summary = tf.Summary(value=[ tf.Summary.Value( tag=tag, image=tf.Summary.Image( encoded_image_string=vis_utils.encode_image_array_as_png_str( image))) ]) summary_writer = tf.summary.FileWriterCache.get(summary_dir) summary_writer.add_summary(summary, global_step) tf.logging.info('Detection visualizations written to summary with tag %s.', tag) def _run_checkpoint_once(tensor_dict, evaluators=None, batch_processor=None, checkpoint_dirs=None, variables_to_restore=None, restore_fn=None, num_batches=1, master='', save_graph=False, save_graph_dir='', losses_dict=None, eval_export_path=None, process_metrics_fn=None): """Evaluates metrics defined in evaluators and returns summaries. This function loads the latest checkpoint in checkpoint_dirs and evaluates all metrics defined in evaluators. The metrics are processed in batch by the batch_processor. Args: tensor_dict: a dictionary holding tensors representing a batch of detections and corresponding groundtruth annotations. evaluators: a list of object of type DetectionEvaluator to be used for evaluation. Note that the metric names produced by different evaluators must be unique. batch_processor: a function taking four arguments: 1. tensor_dict: the same tensor_dict that is passed in as the first argument to this function. 2. sess: a tensorflow session 3. batch_index: an integer representing the index of the batch amongst all batches By default, batch_processor is None, which defaults to running: return sess.run(tensor_dict) To skip an image, it suffices to return an empty dictionary in place of result_dict. checkpoint_dirs: list of directories to load into an EnsembleModel. If it has only one directory, EnsembleModel will not be used -- a DetectionModel will be instantiated directly. Not used if restore_fn is set. variables_to_restore: None, or a dictionary mapping variable names found in a checkpoint to model variables. The dictionary would normally be generated by creating a tf.train.ExponentialMovingAverage object and calling its variables_to_restore() method. Not used if restore_fn is set. restore_fn: None, or a function that takes a tf.Session object and correctly restores all necessary variables from the correct checkpoint file. If None, attempts to restore from the first directory in checkpoint_dirs. num_batches: the number of batches to use for evaluation. master: the location of the Tensorflow session. save_graph: whether or not the Tensorflow graph is stored as a pbtxt file. save_graph_dir: where to store the Tensorflow graph on disk. If save_graph is True this must be non-empty. losses_dict: optional dictionary of scalar detection losses. eval_export_path: Path for saving a json file that contains the detection results in json format. process_metrics_fn: a callback called with evaluation results after each evaluation is done. It could be used e.g. to back up checkpoints with best evaluation scores, or to call an external system to update evaluation results in order to drive best hyper-parameter search. Parameters are: int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics, str checkpoint_file path. Returns: global_step: the count of global steps. all_evaluator_metrics: A dictionary containing metric names and values. Raises: ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least one element. ValueError: if save_graph is True and save_graph_dir is not defined. """ if save_graph and not save_graph_dir: raise ValueError('`save_graph_dir` must be defined.') sess = tf.Session(master, graph=tf.get_default_graph()) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) sess.run(tf.tables_initializer()) checkpoint_file = None if restore_fn: restore_fn(sess) else: if not checkpoint_dirs: raise ValueError('`checkpoint_dirs` must have at least one entry.') checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0]) saver = tf.train.Saver(variables_to_restore) saver.restore(sess, checkpoint_file) if save_graph: tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt') counters = {'skipped': 0, 'success': 0} aggregate_result_losses_dict = collections.defaultdict(list) with slim.queues.QueueRunners(sess): try: for batch in range(int(num_batches)): if (batch + 1) % 100 == 0: tf.logging.info('Running eval ops batch %d/%d', batch + 1, num_batches) if not batch_processor: try: if not losses_dict: losses_dict = {} result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict]) counters['success'] += 1 except tf.errors.InvalidArgumentError: tf.logging.info('Skipping image') counters['skipped'] += 1 result_dict = {} else: result_dict, result_losses_dict = batch_processor( tensor_dict, sess, batch, counters, losses_dict=losses_dict) if not result_dict: continue for key, value in iter(result_losses_dict.items()): aggregate_result_losses_dict[key].append(value) for evaluator in evaluators: # TODO(b/65130867): Use image_id tensor once we fix the input data # decoders to return correct image_id. # TODO(akuznetsa): result_dict contains batches of images, while # add_single_ground_truth_image_info expects a single image. Fix if (isinstance(result_dict, dict) and fields.InputDataFields.key in result_dict and result_dict[fields.InputDataFields.key]): image_id = result_dict[fields.InputDataFields.key] else: image_id = batch evaluator.add_single_ground_truth_image_info( image_id=image_id, groundtruth_dict=result_dict) evaluator.add_single_detected_image_info( image_id=image_id, detections_dict=result_dict) tf.logging.info('Running eval batches done.') except tf.errors.OutOfRangeError: tf.logging.info('Done evaluating -- epoch limit reached') finally: # When done, ask the threads to stop. tf.logging.info('# success: %d', counters['success']) tf.logging.info('# skipped: %d', counters['skipped']) all_evaluator_metrics = {} if eval_export_path and eval_export_path is not None: for evaluator in evaluators: if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)): tf.logging.info('Started dumping to json file.') evaluator.dump_detections_to_json_file( json_output_path=eval_export_path) tf.logging.info('Finished dumping to json file.') for evaluator in evaluators: metrics = evaluator.evaluate() evaluator.clear() if any(key in all_evaluator_metrics for key in metrics): raise ValueError('Metric names between evaluators must not collide.') all_evaluator_metrics.update(metrics) global_step = tf.train.global_step(sess, tf.train.get_global_step()) for key, value in iter(aggregate_result_losses_dict.items()): all_evaluator_metrics['Losses/' + key] = np.mean(value) if process_metrics_fn and checkpoint_file: m = re.search(r'model.ckpt-(\d+)$', checkpoint_file) if not m: tf.logging.error('Failed to parse checkpoint number from: %s', checkpoint_file) else: checkpoint_number = int(m.group(1)) process_metrics_fn(checkpoint_number, all_evaluator_metrics, checkpoint_file) sess.close() return (global_step, all_evaluator_metrics) # TODO(rathodv): Add tests. def repeated_checkpoint_run(tensor_dict, summary_dir, evaluators, batch_processor=None, checkpoint_dirs=None, variables_to_restore=None, restore_fn=None, num_batches=1, eval_interval_secs=120, max_number_of_evaluations=None, max_evaluation_global_step=None, master='', save_graph=False, save_graph_dir='', losses_dict=None, eval_export_path=None, process_metrics_fn=None): """Periodically evaluates desired tensors using checkpoint_dirs or restore_fn. This function repeatedly loads a checkpoint and evaluates a desired set of tensors (provided by tensor_dict) and hands the resulting numpy arrays to a function result_processor which can be used to further process/save/visualize the results. Args: tensor_dict: a dictionary holding tensors representing a batch of detections and corresponding groundtruth annotations. summary_dir: a directory to write metrics summaries. evaluators: a list of object of type DetectionEvaluator to be used for evaluation. Note that the metric names produced by different evaluators must be unique. batch_processor: a function taking three arguments: 1. tensor_dict: the same tensor_dict that is passed in as the first argument to this function. 2. sess: a tensorflow session 3. batch_index: an integer representing the index of the batch amongst all batches By default, batch_processor is None, which defaults to running: return sess.run(tensor_dict) checkpoint_dirs: list of directories to load into a DetectionModel or an EnsembleModel if restore_fn isn't set. Also used to determine when to run next evaluation. Must have at least one element. variables_to_restore: None, or a dictionary mapping variable names found in a checkpoint to model variables. The dictionary would normally be generated by creating a tf.train.ExponentialMovingAverage object and calling its variables_to_restore() method. Not used if restore_fn is set. restore_fn: a function that takes a tf.Session object and correctly restores all necessary variables from the correct checkpoint file. num_batches: the number of batches to use for evaluation. eval_interval_secs: the number of seconds between each evaluation run. max_number_of_evaluations: the max number of iterations of the evaluation. If the value is left as None the evaluation continues indefinitely. max_evaluation_global_step: global step when evaluation stops. master: the location of the Tensorflow session. save_graph: whether or not the Tensorflow graph is saved as a pbtxt file. save_graph_dir: where to save on disk the Tensorflow graph. If store_graph is True this must be non-empty. losses_dict: optional dictionary of scalar detection losses. eval_export_path: Path for saving a json file that contains the detection results in json format. process_metrics_fn: a callback called with evaluation results after each evaluation is done. It could be used e.g. to back up checkpoints with best evaluation scores, or to call an external system to update evaluation results in order to drive best hyper-parameter search. Parameters are: int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics, str checkpoint_file path. Returns: metrics: A dictionary containing metric names and values in the latest evaluation. Raises: ValueError: if max_num_of_evaluations is not None or a positive number. ValueError: if checkpoint_dirs doesn't have at least one element. """ if max_number_of_evaluations and max_number_of_evaluations <= 0: raise ValueError( '`max_number_of_evaluations` must be either None or a positive number.') if max_evaluation_global_step and max_evaluation_global_step <= 0: raise ValueError( '`max_evaluation_global_step` must be either None or positive.') if not checkpoint_dirs: raise ValueError('`checkpoint_dirs` must have at least one entry.') last_evaluated_model_path = None number_of_evaluations = 0 while True: start = time.time() tf.logging.info('Starting evaluation at ' + time.strftime( '%Y-%m-%d-%H:%M:%S', time.gmtime())) model_path = tf.train.latest_checkpoint(checkpoint_dirs[0]) if not model_path: tf.logging.info('No model found in %s. Will try again in %d seconds', checkpoint_dirs[0], eval_interval_secs) elif model_path == last_evaluated_model_path: tf.logging.info('Found already evaluated checkpoint. Will try again in ' '%d seconds', eval_interval_secs) else: last_evaluated_model_path = model_path global_step, metrics = _run_checkpoint_once( tensor_dict, evaluators, batch_processor, checkpoint_dirs, variables_to_restore, restore_fn, num_batches, master, save_graph, save_graph_dir, losses_dict=losses_dict, eval_export_path=eval_export_path, process_metrics_fn=process_metrics_fn) write_metrics(metrics, global_step, summary_dir) if (max_evaluation_global_step and global_step >= max_evaluation_global_step): tf.logging.info('Finished evaluation!') break number_of_evaluations += 1 if (max_number_of_evaluations and number_of_evaluations >= max_number_of_evaluations): tf.logging.info('Finished evaluation!') break time_to_next_eval = start + eval_interval_secs - time.time() if time_to_next_eval > 0: time.sleep(time_to_next_eval) return metrics def _scale_box_to_absolute(args): boxes, image_shape = args return box_list_ops.to_absolute_coordinates( box_list.BoxList(boxes), image_shape[0], image_shape[1]).get() def _resize_detection_masks(arg_tuple): """Resizes detection masks. Args: arg_tuple: A (detection_boxes, detection_masks, image_shape, pad_shape) tuple where detection_boxes is a tf.float32 tensor of size [num_masks, 4] containing the box corners. Row i contains [ymin, xmin, ymax, xmax] of the box corresponding to mask i. Note that the box corners are in normalized coordinates. detection_masks is a tensor of size [num_masks, mask_height, mask_width]. image_shape is a tensor of shape [2] pad_shape is a tensor of shape [2] --- this is assumed to be greater than or equal to image_shape along both dimensions and represents a shape to-be-padded-to. Returns: """ detection_boxes, detection_masks, image_shape, pad_shape = arg_tuple detection_masks_reframed = ops.reframe_box_masks_to_image_masks( detection_masks, detection_boxes, image_shape[0], image_shape[1]) pad_instance_dim = tf.zeros([3, 1], dtype=tf.int32) pad_hw_dim = tf.concat([tf.zeros([1], dtype=tf.int32), pad_shape - image_shape], axis=0) pad_hw_dim = tf.expand_dims(pad_hw_dim, 1) paddings = tf.concat([pad_instance_dim, pad_hw_dim], axis=1) detection_masks_reframed = tf.pad(detection_masks_reframed, paddings) # If the masks are currently float, binarize them. Otherwise keep them as # integers, since they have already been thresholded. if detection_masks_reframed.dtype == tf.float32: detection_masks_reframed = tf.greater(detection_masks_reframed, 0.5) return tf.cast(detection_masks_reframed, tf.uint8) def resize_detection_masks(detection_boxes, detection_masks, original_image_spatial_shapes): """Resizes per-box detection masks to be relative to the entire image. Note that this function only works when the spatial size of all images in the batch is the same. If not, this function should be used with batch_size=1. Args: detection_boxes: A [batch_size, num_instances, 4] float tensor containing bounding boxes. detection_masks: A [batch_size, num_instances, height, width] float tensor containing binary instance masks per box. original_image_spatial_shapes: a [batch_size, 3] shaped int tensor holding the spatial dimensions of each image in the batch. Returns: masks: Masks resized to the spatial extents given by (original_image_spatial_shapes[0, 0], original_image_spatial_shapes[0, 1]) """ # modify original image spatial shapes to be max along each dim # in evaluator, should have access to original_image_spatial_shape field # in add_Eval_Dict max_spatial_shape = tf.reduce_max( original_image_spatial_shapes, axis=0, keep_dims=True) tiled_max_spatial_shape = tf.tile( max_spatial_shape, multiples=[tf.shape(original_image_spatial_shapes)[0], 1]) return shape_utils.static_or_dynamic_map_fn( _resize_detection_masks, elems=[detection_boxes, detection_masks, original_image_spatial_shapes, tiled_max_spatial_shape], dtype=tf.uint8) def _resize_groundtruth_masks(args): """Resizes groundtruth masks to the original image size.""" mask, true_image_shape, original_image_shape, pad_shape = args true_height = true_image_shape[0] true_width = true_image_shape[1] mask = mask[:, :true_height, :true_width] mask = tf.expand_dims(mask, 3) mask = tf.image.resize_images( mask, original_image_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True) paddings = tf.concat( [tf.zeros([3, 1], dtype=tf.int32), tf.expand_dims( tf.concat([tf.zeros([1], dtype=tf.int32), pad_shape-original_image_shape], axis=0), 1)], axis=1) mask = tf.pad(tf.squeeze(mask, 3), paddings) return tf.cast(mask, tf.uint8) def _resize_surface_coordinate_masks(args): detection_boxes, surface_coords, image_shape = args surface_coords_v, surface_coords_u = tf.unstack(surface_coords, axis=-1) surface_coords_v_reframed = ops.reframe_box_masks_to_image_masks( surface_coords_v, detection_boxes, image_shape[0], image_shape[1]) surface_coords_u_reframed = ops.reframe_box_masks_to_image_masks( surface_coords_u, detection_boxes, image_shape[0], image_shape[1]) return tf.stack([surface_coords_v_reframed, surface_coords_u_reframed], axis=-1) def _scale_keypoint_to_absolute(args): keypoints, image_shape = args return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1]) def result_dict_for_single_example(image, key, detections, groundtruth=None, class_agnostic=False, scale_to_absolute=False): """Merges all detection and groundtruth information for a single example. Note that evaluation tools require classes that are 1-indexed, and so this function performs the offset. If `class_agnostic` is True, all output classes have label 1. Args: image: A single 4D uint8 image tensor of shape [1, H, W, C]. key: A single string tensor identifying the image. detections: A dictionary of detections, returned from DetectionModel.postprocess(). groundtruth: (Optional) Dictionary of groundtruth items, with fields: 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in normalized coordinates. 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes. 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional) 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional) 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional) 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional) 'groundtruth_instance_masks': 3D int64 tensor of instance masks (Optional). 'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with keypoints (Optional). class_agnostic: Boolean indicating whether the detections are class-agnostic (i.e. binary). Default False. scale_to_absolute: Boolean indicating whether boxes and keypoints should be scaled to absolute coordinates. Note that for IoU based evaluations, it does not matter whether boxes are expressed in absolute or relative coordinates. Default False. Returns: A dictionary with: 'original_image': A [1, H, W, C] uint8 image tensor. 'key': A string tensor with image identifier. 'detection_boxes': [max_detections, 4] float32 tensor of boxes, in normalized or absolute coordinates, depending on the value of `scale_to_absolute`. 'detection_scores': [max_detections] float32 tensor of scores. 'detection_classes': [max_detections] int64 tensor of 1-indexed classes. 'detection_masks': [max_detections, H, W] float32 tensor of binarized masks, reframed to full image masks. 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in normalized or absolute coordinates, depending on the value of `scale_to_absolute`. (Optional) 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes. (Optional) 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional) 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional) 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional) 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional) 'groundtruth_instance_masks': 3D int64 tensor of instance masks (Optional). 'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with keypoints (Optional). """ if groundtruth: max_gt_boxes = tf.shape( groundtruth[fields.InputDataFields.groundtruth_boxes])[0] for gt_key in groundtruth: # expand groundtruth dict along the batch dimension. groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0) for detection_key in detections: detections[detection_key] = tf.expand_dims( detections[detection_key][0], axis=0) batched_output_dict = result_dict_for_batched_example( image, tf.expand_dims(key, 0), detections, groundtruth, class_agnostic, scale_to_absolute, max_gt_boxes=max_gt_boxes) exclude_keys = [ fields.InputDataFields.original_image, fields.DetectionResultFields.num_detections, fields.InputDataFields.num_groundtruth_boxes ] output_dict = { fields.InputDataFields.original_image: batched_output_dict[fields.InputDataFields.original_image] } for key in batched_output_dict: # remove the batch dimension. if key not in exclude_keys: output_dict[key] = tf.squeeze(batched_output_dict[key], 0) return output_dict def result_dict_for_batched_example(images, keys, detections, groundtruth=None, class_agnostic=False, scale_to_absolute=False, original_image_spatial_shapes=None, true_image_shapes=None, max_gt_boxes=None, label_id_offset=1): """Merges all detection and groundtruth information for a single example. Note that evaluation tools require classes that are 1-indexed, and so this function performs the offset. If `class_agnostic` is True, all output classes have label 1. The groundtruth coordinates of boxes/keypoints in 'groundtruth' dictionary are normalized relative to the (potentially padded) input image, while the coordinates in 'detection' dictionary are normalized relative to the true image shape. Args: images: A single 4D uint8 image tensor of shape [batch_size, H, W, C]. keys: A [batch_size] string/int tensor with image identifier. detections: A dictionary of detections, returned from DetectionModel.postprocess(). groundtruth: (Optional) Dictionary of groundtruth items, with fields: 'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor of boxes, in normalized coordinates. 'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of 1-indexed classes. 'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of bbox area. (Optional) 'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64 tensor. (Optional) 'groundtruth_difficult': [batch_size, max_number_of_boxes] int64 tensor. (Optional) 'groundtruth_group_of': [batch_size, max_number_of_boxes] int64 tensor. (Optional) 'groundtruth_instance_masks': 4D int64 tensor of instance masks (Optional). 'groundtruth_keypoints': [batch_size, max_number_of_boxes, num_keypoints, 2] float32 tensor with keypoints (Optional). 'groundtruth_keypoint_visibilities': [batch_size, max_number_of_boxes, num_keypoints] bool tensor with keypoint visibilities (Optional). 'groundtruth_labeled_classes': [batch_size, num_classes] int64 tensor of 1-indexed classes. (Optional) 'groundtruth_dp_num_points': [batch_size, max_number_of_boxes] int32 tensor. (Optional) 'groundtruth_dp_part_ids': [batch_size, max_number_of_boxes, max_sampled_points] int32 tensor. (Optional) 'groundtruth_dp_surface_coords_list': [batch_size, max_number_of_boxes, max_sampled_points, 4] float32 tensor. (Optional) class_agnostic: Boolean indicating whether the detections are class-agnostic (i.e. binary). Default False. scale_to_absolute: Boolean indicating whether boxes and keypoints should be scaled to absolute coordinates. Note that for IoU based evaluations, it does not matter whether boxes are expressed in absolute or relative coordinates. Default False. original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2] used to resize the image. When set to None, the image size is retained. true_image_shapes: A 2D int32 tensor of shape [batch_size, 3] containing the size of the unpadded original_image. max_gt_boxes: [batch_size] tensor representing the maximum number of groundtruth boxes to pad. label_id_offset: offset for class ids. Returns: A dictionary with: 'original_image': A [batch_size, H, W, C] uint8 image tensor. 'original_image_spatial_shape': A [batch_size, 2] tensor containing the original image sizes. 'true_image_shape': A [batch_size, 3] tensor containing the size of the unpadded original_image. 'key': A [batch_size] string tensor with image identifier. 'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes, in normalized or absolute coordinates, depending on the value of `scale_to_absolute`. 'detection_scores': [batch_size, max_detections] float32 tensor of scores. 'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed classes. 'detection_masks': [batch_size, max_detections, H, W] uint8 tensor of instance masks, reframed to full image masks. Note that these may be binarized (e.g. {0, 1}), or may contain 1-indexed part labels. (Optional) 'detection_keypoints': [batch_size, max_detections, num_keypoints, 2] float32 tensor containing keypoint coordinates. (Optional) 'detection_keypoint_scores': [batch_size, max_detections, num_keypoints] float32 tensor containing keypoint scores. (Optional) 'detection_surface_coords': [batch_size, max_detection, H, W, 2] float32 tensor with normalized surface coordinates (e.g. DensePose UV coordinates). (Optional) 'num_detections': [batch_size] int64 tensor containing number of valid detections. 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in normalized or absolute coordinates, depending on the value of `scale_to_absolute`. (Optional) 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed classes. (Optional) 'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox area. (Optional) 'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional) 'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional) 'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional) 'groundtruth_instance_masks': 4D int64 tensor of instance masks (Optional). 'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32 tensor with keypoints (Optional). 'groundtruth_keypoint_visibilities': [batch_size, num_boxes, num_keypoints] bool tensor with keypoint visibilities (Optional). 'groundtruth_labeled_classes': [batch_size, num_classes] int64 tensor of 1-indexed classes. (Optional) 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number of groundtruth boxes per image. Raises: ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape [2]. ValueError: if true_image_shapes is not 2D int32 tensor of shape [3]. """ input_data_fields = fields.InputDataFields if original_image_spatial_shapes is None: original_image_spatial_shapes = tf.tile( tf.expand_dims(tf.shape(images)[1:3], axis=0), multiples=[tf.shape(images)[0], 1]) else: if (len(original_image_spatial_shapes.shape) != 2 and original_image_spatial_shapes.shape[1] != 2): raise ValueError( '`original_image_spatial_shape` should be a 2D tensor of shape ' '[batch_size, 2].') if true_image_shapes is None: true_image_shapes = tf.tile( tf.expand_dims(tf.shape(images)[1:4], axis=0), multiples=[tf.shape(images)[0], 1]) else: if (len(true_image_shapes.shape) != 2 and true_image_shapes.shape[1] != 3): raise ValueError('`true_image_shapes` should be a 2D tensor of ' 'shape [batch_size, 3].') output_dict = { input_data_fields.original_image: images, input_data_fields.key: keys, input_data_fields.original_image_spatial_shape: ( original_image_spatial_shapes), input_data_fields.true_image_shape: true_image_shapes } detection_fields = fields.DetectionResultFields detection_boxes = detections[detection_fields.detection_boxes] detection_scores = detections[detection_fields.detection_scores] num_detections = tf.cast(detections[detection_fields.num_detections], dtype=tf.int32) if class_agnostic: detection_classes = tf.ones_like(detection_scores, dtype=tf.int64) else: detection_classes = ( tf.to_int64(detections[detection_fields.detection_classes]) + label_id_offset) if scale_to_absolute: output_dict[detection_fields.detection_boxes] = ( shape_utils.static_or_dynamic_map_fn( _scale_box_to_absolute, elems=[detection_boxes, original_image_spatial_shapes], dtype=tf.float32)) else: output_dict[detection_fields.detection_boxes] = detection_boxes output_dict[detection_fields.detection_classes] = detection_classes output_dict[detection_fields.detection_scores] = detection_scores output_dict[detection_fields.num_detections] = num_detections if detection_fields.detection_masks in detections: detection_masks = detections[detection_fields.detection_masks] output_dict[detection_fields.detection_masks] = resize_detection_masks( detection_boxes, detection_masks, original_image_spatial_shapes) if detection_fields.detection_surface_coords in detections: detection_surface_coords = detections[ detection_fields.detection_surface_coords] output_dict[detection_fields.detection_surface_coords] = ( shape_utils.static_or_dynamic_map_fn( _resize_surface_coordinate_masks, elems=[detection_boxes, detection_surface_coords, original_image_spatial_shapes], dtype=tf.float32)) if detection_fields.detection_keypoints in detections: detection_keypoints = detections[detection_fields.detection_keypoints] output_dict[detection_fields.detection_keypoints] = detection_keypoints if scale_to_absolute: output_dict[detection_fields.detection_keypoints] = ( shape_utils.static_or_dynamic_map_fn( _scale_keypoint_to_absolute, elems=[detection_keypoints, original_image_spatial_shapes], dtype=tf.float32)) if detection_fields.detection_keypoint_scores in detections: output_dict[detection_fields.detection_keypoint_scores] = detections[ detection_fields.detection_keypoint_scores] else: output_dict[detection_fields.detection_keypoint_scores] = tf.ones_like( detections[detection_fields.detection_keypoints][:, :, :, 0]) if groundtruth: if max_gt_boxes is None: if input_data_fields.num_groundtruth_boxes in groundtruth: max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes] else: raise ValueError( 'max_gt_boxes must be provided when processing batched examples.') if input_data_fields.groundtruth_instance_masks in groundtruth: masks = groundtruth[input_data_fields.groundtruth_instance_masks] max_spatial_shape = tf.reduce_max( original_image_spatial_shapes, axis=0, keep_dims=True) tiled_max_spatial_shape = tf.tile( max_spatial_shape, multiples=[tf.shape(original_image_spatial_shapes)[0], 1]) groundtruth[input_data_fields.groundtruth_instance_masks] = ( shape_utils.static_or_dynamic_map_fn( _resize_groundtruth_masks, elems=[masks, true_image_shapes, original_image_spatial_shapes, tiled_max_spatial_shape], dtype=tf.uint8)) output_dict.update(groundtruth) image_shape = tf.cast(tf.shape(images), tf.float32) image_height, image_width = image_shape[1], image_shape[2] def _scale_box_to_normalized_true_image(args): """Scale the box coordinates to be relative to the true image shape.""" boxes, true_image_shape = args true_image_shape = tf.cast(true_image_shape, tf.float32) true_height, true_width = true_image_shape[0], true_image_shape[1] normalized_window = tf.stack([0.0, 0.0, true_height / image_height, true_width / image_width]) return box_list_ops.change_coordinate_frame( box_list.BoxList(boxes), normalized_window).get() groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes] groundtruth_boxes = shape_utils.static_or_dynamic_map_fn( _scale_box_to_normalized_true_image, elems=[groundtruth_boxes, true_image_shapes], dtype=tf.float32) output_dict[input_data_fields.groundtruth_boxes] = groundtruth_boxes if input_data_fields.groundtruth_keypoints in groundtruth: # If groundtruth_keypoints is in the groundtruth dictionary. Update the # coordinates to conform with the true image shape. def _scale_keypoints_to_normalized_true_image(args): """Scale the box coordinates to be relative to the true image shape.""" keypoints, true_image_shape = args true_image_shape = tf.cast(true_image_shape, tf.float32) true_height, true_width = true_image_shape[0], true_image_shape[1] normalized_window = tf.stack( [0.0, 0.0, true_height / image_height, true_width / image_width]) return keypoint_ops.change_coordinate_frame(keypoints, normalized_window) groundtruth_keypoints = groundtruth[ input_data_fields.groundtruth_keypoints] groundtruth_keypoints = shape_utils.static_or_dynamic_map_fn( _scale_keypoints_to_normalized_true_image, elems=[groundtruth_keypoints, true_image_shapes], dtype=tf.float32) output_dict[ input_data_fields.groundtruth_keypoints] = groundtruth_keypoints if scale_to_absolute: groundtruth_boxes = output_dict[input_data_fields.groundtruth_boxes] output_dict[input_data_fields.groundtruth_boxes] = ( shape_utils.static_or_dynamic_map_fn( _scale_box_to_absolute, elems=[groundtruth_boxes, original_image_spatial_shapes], dtype=tf.float32)) if input_data_fields.groundtruth_keypoints in groundtruth: groundtruth_keypoints = output_dict[ input_data_fields.groundtruth_keypoints] output_dict[input_data_fields.groundtruth_keypoints] = ( shape_utils.static_or_dynamic_map_fn( _scale_keypoint_to_absolute, elems=[groundtruth_keypoints, original_image_spatial_shapes], dtype=tf.float32)) # For class-agnostic models, groundtruth classes all become 1. if class_agnostic: groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes] groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64) output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes return output_dict def get_evaluators(eval_config, categories, evaluator_options=None): """Returns the evaluator class according to eval_config, valid for categories. Args: eval_config: An `eval_pb2.EvalConfig`. categories: A list of dicts, each of which has the following keys - 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog'. 'keypoints': (optional) dict mapping this category's keypoints to unique ids. evaluator_options: A dictionary of metric names (see EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization keyword arguments. For example: evalator_options = { 'coco_detection_metrics': {'include_metrics_per_category': True} } Returns: An list of instances of DetectionEvaluator. Raises: ValueError: if metric is not in the metric class dictionary. """ evaluator_options = evaluator_options or {} eval_metric_fn_keys = eval_config.metrics_set if not eval_metric_fn_keys: eval_metric_fn_keys = [EVAL_DEFAULT_METRIC] evaluators_list = [] for eval_metric_fn_key in eval_metric_fn_keys: if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key in evaluator_options else {}) evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key]( categories, **kwargs_dict)) if isinstance(eval_config, eval_pb2.EvalConfig): parameterized_metrics = eval_config.parameterized_metric for parameterized_metric in parameterized_metrics: assert parameterized_metric.HasField('parameterized_metric') if parameterized_metric.WhichOneof( 'parameterized_metric') == EVAL_KEYPOINT_METRIC: keypoint_metrics = parameterized_metric.coco_keypoint_metrics # Create category to keypoints mapping dict. category_keypoints = {} class_label = keypoint_metrics.class_label category = None for cat in categories: if cat['name'] == class_label: category = cat break if not category: continue keypoints_for_this_class = category['keypoints'] category_keypoints = [{ 'id': keypoints_for_this_class[kp_name], 'name': kp_name } for kp_name in keypoints_for_this_class] # Create keypoint evaluator for this category. evaluators_list.append(EVAL_METRICS_CLASS_DICT[EVAL_KEYPOINT_METRIC]( category['id'], category_keypoints, class_label, keypoint_metrics.keypoint_label_to_sigmas)) return evaluators_list def get_eval_metric_ops_for_evaluators(eval_config, categories, eval_dict): """Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`. Args: eval_config: An `eval_pb2.EvalConfig`. categories: A list of dicts, each of which has the following keys - 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog'. eval_dict: An evaluation dictionary, returned from result_dict_for_single_example(). Returns: A dictionary of metric names to tuple of value_op and update_op that can be used as eval metric ops in tf.EstimatorSpec. """ eval_metric_ops = {} evaluator_options = evaluator_options_from_eval_config(eval_config) evaluators_list = get_evaluators(eval_config, categories, evaluator_options) for evaluator in evaluators_list: eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops( eval_dict)) return eval_metric_ops def evaluator_options_from_eval_config(eval_config): """Produces a dictionary of evaluation options for each eval metric. Args: eval_config: An `eval_pb2.EvalConfig`. Returns: evaluator_options: A dictionary of metric names (see EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization keyword arguments. For example: evalator_options = { 'coco_detection_metrics': {'include_metrics_per_category': True} } """ eval_metric_fn_keys = eval_config.metrics_set evaluator_options = {} for eval_metric_fn_key in eval_metric_fn_keys: if eval_metric_fn_key in ( 'coco_detection_metrics', 'coco_mask_metrics', 'lvis_mask_metrics'): evaluator_options[eval_metric_fn_key] = { 'include_metrics_per_category': ( eval_config.include_metrics_per_category) } if (hasattr(eval_config, 'all_metrics_per_category') and eval_config.all_metrics_per_category): evaluator_options[eval_metric_fn_key].update({ 'all_metrics_per_category': eval_config.all_metrics_per_category }) # For coco detection eval, if the eval_config proto contains the # "skip_predictions_for_unlabeled_class" field, include this field in # evaluator_options. if eval_metric_fn_key == 'coco_detection_metrics' and hasattr( eval_config, 'skip_predictions_for_unlabeled_class'): evaluator_options[eval_metric_fn_key].update({ 'skip_predictions_for_unlabeled_class': (eval_config.skip_predictions_for_unlabeled_class) }) for super_category in eval_config.super_categories: if 'super_categories' not in evaluator_options[eval_metric_fn_key]: evaluator_options[eval_metric_fn_key]['super_categories'] = {} key = super_category value = eval_config.super_categories[key].split(',') evaluator_options[eval_metric_fn_key]['super_categories'][key] = value if eval_metric_fn_key == 'lvis_mask_metrics' and hasattr( eval_config, 'export_path'): evaluator_options[eval_metric_fn_key].update({ 'export_path': eval_config.export_path }) elif eval_metric_fn_key == 'precision_at_recall_detection_metrics': evaluator_options[eval_metric_fn_key] = { 'recall_lower_bound': (eval_config.recall_lower_bound), 'recall_upper_bound': (eval_config.recall_upper_bound), 'skip_predictions_for_unlabeled_class': eval_config.skip_predictions_for_unlabeled_class, } return evaluator_options def has_densepose(eval_dict): return (fields.DetectionResultFields.detection_masks in eval_dict and fields.DetectionResultFields.detection_surface_coords in eval_dict)
55,742
44.616203
80
py
models
models-master/research/object_detection/exporter_lib_tf2_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for exporter_lib_v2.py.""" from __future__ import division import io import os import unittest from absl.testing import parameterized import numpy as np from PIL import Image import six import tensorflow.compat.v2 as tf from object_detection import exporter_lib_v2 from object_detection.builders import model_builder from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.protos import pipeline_pb2 from object_detection.utils import dataset_util from object_detection.utils import tf_version if six.PY2: import mock # pylint: disable=g-importing-member,g-import-not-at-top else: from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top class FakeModel(model.DetectionModel): def __init__(self, conv_weight_scalar=1.0): super(FakeModel, self).__init__(num_classes=2) self._conv = tf.keras.layers.Conv2D( filters=1, kernel_size=1, strides=(1, 1), padding='valid', kernel_initializer=tf.keras.initializers.Constant( value=conv_weight_scalar)) def preprocess(self, inputs): return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs) def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): return_dict = {'image': self._conv(preprocessed_inputs)} if 'side_inp_1' in side_inputs: return_dict['image'] += side_inputs['side_inp_1'] return return_dict def postprocess(self, prediction_dict, true_image_shapes): predict_tensor_sum = tf.reduce_sum(prediction_dict['image']) with tf.control_dependencies(list(prediction_dict.values())): postprocessed_tensors = { 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]], tf.float32), 'detection_scores': predict_tensor_sum + tf.constant( [[0.7, 0.6], [0.9, 0.0]], tf.float32), 'detection_classes': tf.constant([[0, 1], [1, 0]], tf.float32), 'num_detections': tf.constant([2, 1], tf.float32), } return postprocessed_tensors def predict_masks_from_boxes(self, prediction_dict, true_image_shapes, boxes): output_dict = self.postprocess(prediction_dict, true_image_shapes) output_dict.update({ 'detection_masks': tf.ones(shape=(1, 2, 16), dtype=tf.float32), }) return output_dict def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): pass def restore_from_objects(self, fine_tune_checkpoint_type): pass def loss(self, prediction_dict, true_image_shapes): pass def regularization_losses(self): pass def updates(self): pass @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): def _save_checkpoint_from_mock_model( self, checkpoint_dir, conv_weight_scalar=6.0): mock_model = FakeModel(conv_weight_scalar) fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) mock_model.postprocess(predictions, true_image_shapes) ckpt = tf.train.Checkpoint(model=mock_model) exported_checkpoint_manager = tf.train.CheckpointManager( ckpt, checkpoint_dir, max_to_keep=1) exported_checkpoint_manager.save(checkpoint_number=0) @parameterized.parameters( {'input_type': 'image_tensor'}, {'input_type': 'encoded_image_string_tensor'}, {'input_type': 'tf_example'}, ) def test_export_yields_correct_directory_structure( self, input_type='image_tensor'): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter_lib_v2.export_inference_graph( input_type=input_type, pipeline_config=pipeline_config, trained_checkpoint_dir=tmp_dir, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'variables', 'variables.index'))) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'variables', 'variables.data-00000-of-00001'))) self.assertTrue(os.path.exists(os.path.join( output_directory, 'checkpoint', 'ckpt-0.index'))) self.assertTrue(os.path.exists(os.path.join( output_directory, 'checkpoint', 'ckpt-0.data-00000-of-00001'))) self.assertTrue(os.path.exists(os.path.join( output_directory, 'pipeline.config'))) def get_dummy_input(self, input_type): """Get dummy input for the given input type.""" if input_type == 'image_tensor': return np.zeros((1, 20, 20, 3), dtype=np.uint8) if input_type == 'float_image_tensor': return np.zeros((1, 20, 20, 3), dtype=np.float32) elif input_type == 'encoded_image_string_tensor': image = Image.new('RGB', (20, 20)) byte_io = io.BytesIO() image.save(byte_io, 'PNG') return [byte_io.getvalue()] elif input_type == 'tf_example': image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8) encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy() example = tf.train.Example( features=tf.train.Features( feature={ 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), 'image/format': dataset_util.bytes_feature(six.b('jpeg')), 'image/source_id': dataset_util.bytes_feature(six.b('image_id')), })).SerializeToString() return [example] @parameterized.parameters( {'input_type': 'image_tensor'}, {'input_type': 'encoded_image_string_tensor'}, {'input_type': 'tf_example'}, {'input_type': 'float_image_tensor'}, ) def test_export_saved_model_and_run_inference( self, input_type='image_tensor'): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter_lib_v2.export_inference_graph( input_type=input_type, pipeline_config=pipeline_config, trained_checkpoint_dir=tmp_dir, output_directory=output_directory) saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) image = self.get_dummy_input(input_type) detections = detect_fn(tf.constant(image)) detection_fields = fields.DetectionResultFields self.assertAllClose(detections[detection_fields.detection_boxes], [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(detections[detection_fields.detection_scores], [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(detections[detection_fields.detection_classes], [[1, 2], [2, 1]]) self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) @parameterized.parameters( {'use_default_serving': True}, {'use_default_serving': False} ) def test_export_saved_model_and_run_inference_with_side_inputs( self, input_type='image_tensor', use_default_serving=True): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter_lib_v2.export_inference_graph( input_type=input_type, pipeline_config=pipeline_config, trained_checkpoint_dir=tmp_dir, output_directory=output_directory, use_side_inputs=True, side_input_shapes='1/2,2', side_input_names='side_inp_1,side_inp_2', side_input_types='tf.float32,tf.uint8') saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) detect_fn_sig = detect_fn.signatures['serving_default'] image = tf.constant(self.get_dummy_input(input_type)) side_input_1 = np.ones((1,), dtype=np.float32) side_input_2 = np.ones((2, 2), dtype=np.uint8) if use_default_serving: detections = detect_fn_sig(input_tensor=image, side_inp_1=tf.constant(side_input_1), side_inp_2=tf.constant(side_input_2)) else: detections = detect_fn(image, tf.constant(side_input_1), tf.constant(side_input_2)) detection_fields = fields.DetectionResultFields self.assertAllClose(detections[detection_fields.detection_boxes], [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(detections[detection_fields.detection_scores], [[400.7, 400.6], [400.9, 400.0]]) self.assertAllClose(detections[detection_fields.detection_classes], [[1, 2], [2, 1]]) self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) def test_export_checkpoint_and_run_inference_with_image(self): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter_lib_v2.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_dir=tmp_dir, output_directory=output_directory) mock_model = FakeModel() ckpt = tf.compat.v2.train.Checkpoint( model=mock_model) checkpoint_dir = os.path.join(tmp_dir, 'output', 'checkpoint') manager = tf.compat.v2.train.CheckpointManager( ckpt, checkpoint_dir, max_to_keep=7) ckpt.restore(manager.latest_checkpoint).expect_partial() fake_image = tf.ones(shape=[1, 5, 5, 3], dtype=tf.float32) preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) detections = mock_model.postprocess(predictions, true_image_shapes) # 150 = conv_weight_scalar * height * width * channels = 2 * 5 * 5 * 3. self.assertAllClose(detections['detection_scores'], [[150 + 0.7, 150 + 0.6], [150 + 0.9, 150 + 0.0]]) class DetectionFromImageAndBoxModuleTest(tf.test.TestCase): def get_dummy_input(self, input_type): """Get dummy input for the given input type.""" if input_type == 'image_tensor' or input_type == 'image_and_boxes_tensor': return np.zeros((1, 20, 20, 3), dtype=np.uint8) if input_type == 'float_image_tensor': return np.zeros((1, 20, 20, 3), dtype=np.float32) elif input_type == 'encoded_image_string_tensor': image = Image.new('RGB', (20, 20)) byte_io = io.BytesIO() image.save(byte_io, 'PNG') return [byte_io.getvalue()] elif input_type == 'tf_example': image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8) encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy() example = tf.train.Example( features=tf.train.Features( feature={ 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), 'image/format': dataset_util.bytes_feature(six.b('jpeg')), 'image/source_id': dataset_util.bytes_feature(six.b('image_id')), })).SerializeToString() return [example] def _save_checkpoint_from_mock_model(self, checkpoint_dir, conv_weight_scalar=6.0): mock_model = FakeModel(conv_weight_scalar) fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) mock_model.postprocess(predictions, true_image_shapes) ckpt = tf.train.Checkpoint(model=mock_model) exported_checkpoint_manager = tf.train.CheckpointManager( ckpt, checkpoint_dir, max_to_keep=1) exported_checkpoint_manager.save(checkpoint_number=0) def test_export_saved_model_and_run_inference_for_segmentation( self, input_type='image_and_boxes_tensor'): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter_lib_v2.export_inference_graph( input_type=input_type, pipeline_config=pipeline_config, trained_checkpoint_dir=tmp_dir, output_directory=output_directory) saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) image = self.get_dummy_input(input_type) boxes = tf.constant([ [ [0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8], ], ]) detections = detect_fn(tf.constant(image), boxes) detection_fields = fields.DetectionResultFields self.assertIn(detection_fields.detection_masks, detections) self.assertListEqual( list(detections[detection_fields.detection_masks].shape), [1, 2, 16]) if __name__ == '__main__': tf.enable_v2_behavior() tf.test.main()
16,191
41.610526
85
py
models
models-master/research/object_detection/exporter_lib_v2.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions to export object detection inference graph.""" import ast import os import tensorflow.compat.v2 as tf from object_detection.builders import model_builder from object_detection.core import standard_fields as fields from object_detection.data_decoders import tf_example_decoder from object_detection.utils import config_util INPUT_BUILDER_UTIL_MAP = { 'model_build': model_builder.build, } def _decode_image(encoded_image_string_tensor): image_tensor = tf.image.decode_image(encoded_image_string_tensor, channels=3) image_tensor.set_shape((None, None, 3)) return image_tensor def _decode_tf_example(tf_example_string_tensor): tensor_dict = tf_example_decoder.TfExampleDecoder().decode( tf_example_string_tensor) image_tensor = tensor_dict[fields.InputDataFields.image] return image_tensor def _combine_side_inputs(side_input_shapes='', side_input_types='', side_input_names=''): """Zips the side inputs together. Args: side_input_shapes: forward-slash-separated list of comma-separated lists describing input shapes. side_input_types: comma-separated list of the types of the inputs. side_input_names: comma-separated list of the names of the inputs. Returns: a zipped list of side input tuples. """ side_input_shapes = [ ast.literal_eval('[' + x + ']') for x in side_input_shapes.split('/') ] side_input_types = eval('[' + side_input_types + ']') # pylint: disable=eval-used side_input_names = side_input_names.split(',') return zip(side_input_shapes, side_input_types, side_input_names) class DetectionInferenceModule(tf.Module): """Detection Inference Module.""" def __init__(self, detection_model, use_side_inputs=False, zipped_side_inputs=None): """Initializes a module for detection. Args: detection_model: the detection model to use for inference. use_side_inputs: whether to use side inputs. zipped_side_inputs: the zipped side inputs. """ self._model = detection_model def _get_side_input_signature(self, zipped_side_inputs): sig = [] side_input_names = [] for info in zipped_side_inputs: sig.append(tf.TensorSpec(shape=info[0], dtype=info[1], name=info[2])) side_input_names.append(info[2]) return sig def _get_side_names_from_zip(self, zipped_side_inputs): return [side[2] for side in zipped_side_inputs] def _preprocess_input(self, batch_input, decode_fn): # Input preprocessing happends on the CPU. We don't need to use the device # placement as it is automatically handled by TF. def _decode_and_preprocess(single_input): image = decode_fn(single_input) image = tf.cast(image, tf.float32) image, true_shape = self._model.preprocess(image[tf.newaxis, :, :, :]) return image[0], true_shape[0] images, true_shapes = tf.map_fn( _decode_and_preprocess, elems=batch_input, parallel_iterations=32, back_prop=False, fn_output_signature=(tf.float32, tf.int32)) return images, true_shapes def _run_inference_on_images(self, images, true_shapes, **kwargs): """Cast image to float and run inference. Args: images: float32 Tensor of shape [None, None, None, 3]. true_shapes: int32 Tensor of form [batch, 3] **kwargs: additional keyword arguments. Returns: Tensor dictionary holding detections. """ label_id_offset = 1 prediction_dict = self._model.predict(images, true_shapes, **kwargs) detections = self._model.postprocess(prediction_dict, true_shapes) classes_field = fields.DetectionResultFields.detection_classes detections[classes_field] = ( tf.cast(detections[classes_field], tf.float32) + label_id_offset) for key, val in detections.items(): detections[key] = tf.cast(val, tf.float32) return detections class DetectionFromImageModule(DetectionInferenceModule): """Detection Inference Module for image inputs.""" def __init__(self, detection_model, use_side_inputs=False, zipped_side_inputs=None): """Initializes a module for detection. Args: detection_model: the detection model to use for inference. use_side_inputs: whether to use side inputs. zipped_side_inputs: the zipped side inputs. """ if zipped_side_inputs is None: zipped_side_inputs = [] sig = [tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8, name='input_tensor')] if use_side_inputs: sig.extend(self._get_side_input_signature(zipped_side_inputs)) self._side_input_names = self._get_side_names_from_zip(zipped_side_inputs) def call_func(input_tensor, *side_inputs): kwargs = dict(zip(self._side_input_names, side_inputs)) images, true_shapes = self._preprocess_input(input_tensor, lambda x: x) return self._run_inference_on_images(images, true_shapes, **kwargs) self.__call__ = tf.function(call_func, input_signature=sig) # TODO(kaushikshiv): Check if omitting the signature also works. super(DetectionFromImageModule, self).__init__(detection_model, use_side_inputs, zipped_side_inputs) def get_true_shapes(input_tensor): input_shape = tf.shape(input_tensor) batch = input_shape[0] image_shape = input_shape[1:] true_shapes = tf.tile(image_shape[tf.newaxis, :], [batch, 1]) return true_shapes class DetectionFromFloatImageModule(DetectionInferenceModule): """Detection Inference Module for float image inputs.""" @tf.function( input_signature=[ tf.TensorSpec(shape=[None, None, None, 3], dtype=tf.float32)]) def __call__(self, input_tensor): images, true_shapes = self._preprocess_input(input_tensor, lambda x: x) return self._run_inference_on_images(images, true_shapes) class DetectionFromEncodedImageModule(DetectionInferenceModule): """Detection Inference Module for encoded image string inputs.""" @tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)]) def __call__(self, input_tensor): images, true_shapes = self._preprocess_input(input_tensor, _decode_image) return self._run_inference_on_images(images, true_shapes) class DetectionFromTFExampleModule(DetectionInferenceModule): """Detection Inference Module for TF.Example inputs.""" @tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)]) def __call__(self, input_tensor): images, true_shapes = self._preprocess_input(input_tensor, _decode_tf_example) return self._run_inference_on_images(images, true_shapes) def export_inference_graph(input_type, pipeline_config, trained_checkpoint_dir, output_directory, use_side_inputs=False, side_input_shapes='', side_input_types='', side_input_names=''): """Exports inference graph for the model specified in the pipeline config. This function creates `output_directory` if it does not already exist, which will hold a copy of the pipeline config with filename `pipeline.config`, and two subdirectories named `checkpoint` and `saved_model` (containing the exported checkpoint and SavedModel respectively). Args: input_type: Type of input for the graph. Can be one of ['image_tensor', 'encoded_image_string_tensor', 'tf_example']. pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. trained_checkpoint_dir: Path to the trained checkpoint file. output_directory: Path to write outputs. use_side_inputs: boolean that determines whether side inputs should be included in the input signature. side_input_shapes: forward-slash-separated list of comma-separated lists describing input shapes. side_input_types: comma-separated list of the types of the inputs. side_input_names: comma-separated list of the names of the inputs. Raises: ValueError: if input_type is invalid. """ output_checkpoint_directory = os.path.join(output_directory, 'checkpoint') output_saved_model_directory = os.path.join(output_directory, 'saved_model') detection_model = INPUT_BUILDER_UTIL_MAP['model_build']( pipeline_config.model, is_training=False) ckpt = tf.train.Checkpoint( model=detection_model) manager = tf.train.CheckpointManager( ckpt, trained_checkpoint_dir, max_to_keep=1) status = ckpt.restore(manager.latest_checkpoint).expect_partial() if input_type not in DETECTION_MODULE_MAP: raise ValueError('Unrecognized `input_type`') if use_side_inputs and input_type != 'image_tensor': raise ValueError('Side inputs supported for image_tensor input type only.') zipped_side_inputs = [] if use_side_inputs: zipped_side_inputs = _combine_side_inputs(side_input_shapes, side_input_types, side_input_names) detection_module = DETECTION_MODULE_MAP[input_type](detection_model, use_side_inputs, list(zipped_side_inputs)) # Getting the concrete function traces the graph and forces variables to # be constructed --- only after this can we save the checkpoint and # saved model. concrete_function = detection_module.__call__.get_concrete_function() status.assert_existing_objects_matched() exported_checkpoint_manager = tf.train.CheckpointManager( ckpt, output_checkpoint_directory, max_to_keep=1) exported_checkpoint_manager.save(checkpoint_number=0) tf.saved_model.save(detection_module, output_saved_model_directory, signatures=concrete_function) config_util.save_pipeline_config(pipeline_config, output_directory) class DetectionFromImageAndBoxModule(DetectionInferenceModule): """Detection Inference Module for image with bounding box inputs. The saved model will require two inputs (image and normalized boxes) and run per-box mask prediction. To be compatible with this exporter, the detection model has to implement a called predict_masks_from_boxes( prediction_dict, true_image_shapes, provided_boxes, **params), where - prediciton_dict is a dict returned by the predict method. - true_image_shapes is a tensor of size [batch_size, 3], containing the true shape of each image in case it is padded. - provided_boxes is a [batch_size, num_boxes, 4] size tensor containing boxes specified in normalized coordinates. """ def __init__(self, detection_model, use_side_inputs=False, zipped_side_inputs=None): """Initializes a module for detection. Args: detection_model: the detection model to use for inference. use_side_inputs: whether to use side inputs. zipped_side_inputs: the zipped side inputs. """ assert hasattr(detection_model, 'predict_masks_from_boxes') super(DetectionFromImageAndBoxModule, self).__init__(detection_model, use_side_inputs, zipped_side_inputs) def _run_segmentation_on_images(self, image, boxes, **kwargs): """Run segmentation on images with provided boxes. Args: image: uint8 Tensor of shape [1, None, None, 3]. boxes: float32 tensor of shape [1, None, 4] containing normalized box coordinates. **kwargs: additional keyword arguments. Returns: Tensor dictionary holding detections (including masks). """ label_id_offset = 1 image = tf.cast(image, tf.float32) image, shapes = self._model.preprocess(image) prediction_dict = self._model.predict(image, shapes, **kwargs) detections = self._model.predict_masks_from_boxes(prediction_dict, shapes, boxes) classes_field = fields.DetectionResultFields.detection_classes detections[classes_field] = ( tf.cast(detections[classes_field], tf.float32) + label_id_offset) for key, val in detections.items(): detections[key] = tf.cast(val, tf.float32) return detections @tf.function(input_signature=[ tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8), tf.TensorSpec(shape=[1, None, 4], dtype=tf.float32) ]) def __call__(self, input_tensor, boxes): return self._run_segmentation_on_images(input_tensor, boxes) DETECTION_MODULE_MAP = { 'image_tensor': DetectionFromImageModule, 'encoded_image_string_tensor': DetectionFromEncodedImageModule, 'tf_example': DetectionFromTFExampleModule, 'float_image_tensor': DetectionFromFloatImageModule, 'image_and_boxes_tensor': DetectionFromImageAndBoxModule, }
13,886
37.89916
84
py
models
models-master/research/object_detection/model_main.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Binary to run train and evaluation on object detection model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import flags import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from object_detection import model_lib flags.DEFINE_string( 'model_dir', None, 'Path to output model directory ' 'where event and checkpoint files will be written.') flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' 'file.') flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') flags.DEFINE_boolean('eval_training_data', False, 'If training data should be evaluated for this job. Note ' 'that one call only use this in eval-only mode, and ' '`checkpoint_dir` must be supplied.') flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of ' 'every n eval input examples, where n is provided.') flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' 'one of every n train input examples for evaluation, ' 'where n is provided. This is only used if ' '`eval_training_data` is True.') flags.DEFINE_string( 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If ' '`checkpoint_dir` is provided, this binary operates in eval-only mode, ' 'writing resulting metrics to `model_dir`.') flags.DEFINE_boolean( 'run_once', False, 'If running in eval-only mode, whether to run just ' 'one round of eval vs running continuously (default).' ) flags.DEFINE_integer( 'max_eval_retries', 0, 'If running continuous eval, the maximum number of ' 'retries upon encountering tf.errors.InvalidArgumentError. If negative, ' 'will always retry the evaluation.' ) FLAGS = flags.FLAGS def main(unused_argv): flags.mark_flag_as_required('model_dir') flags.mark_flag_as_required('pipeline_config_path') config = tf_estimator.RunConfig(model_dir=FLAGS.model_dir) train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config=config, pipeline_config_path=FLAGS.pipeline_config_path, train_steps=FLAGS.num_train_steps, sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, sample_1_of_n_eval_on_train_examples=( FLAGS.sample_1_of_n_eval_on_train_examples)) estimator = train_and_eval_dict['estimator'] train_input_fn = train_and_eval_dict['train_input_fn'] eval_input_fns = train_and_eval_dict['eval_input_fns'] eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] predict_input_fn = train_and_eval_dict['predict_input_fn'] train_steps = train_and_eval_dict['train_steps'] if FLAGS.checkpoint_dir: if FLAGS.eval_training_data: name = 'training_data' input_fn = eval_on_train_input_fn else: name = 'validation_data' # The first eval input will be evaluated. input_fn = eval_input_fns[0] if FLAGS.run_once: estimator.evaluate(input_fn, steps=None, checkpoint_path=tf.train.latest_checkpoint( FLAGS.checkpoint_dir)) else: model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn, train_steps, name, FLAGS.max_eval_retries) else: train_spec, eval_specs = model_lib.create_train_and_eval_specs( train_input_fn, eval_input_fns, eval_on_train_input_fn, predict_input_fn, train_steps, eval_on_train_data=False) # Currently only a single Eval Spec is allowed. tf_estimator.train_and_evaluate(estimator, train_spec, eval_specs[0]) if __name__ == '__main__': tf.app.run()
4,573
40.581818
80
py
models
models-master/research/object_detection/inputs_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.tflearn.inputs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import unittest from absl import logging from absl.testing import parameterized import numpy as np import six import tensorflow.compat.v1 as tf from object_detection import inputs from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.utils import config_util from object_detection.utils import test_case from object_detection.utils import test_utils from object_detection.utils import tf_version if six.PY2: import mock # pylint: disable=g-import-not-at-top else: from unittest import mock # pylint: disable=g-import-not-at-top, g-importing-member FLAGS = tf.flags.FLAGS def _get_configs_for_model(model_name): """Returns configurations for model.""" fname = os.path.join(tf.resource_loader.get_data_files_path(), 'samples/configs/' + model_name + '.config') label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), 'data/pet_label_map.pbtxt') data_path = os.path.join(tf.resource_loader.get_data_files_path(), 'test_data/pets_examples.record') configs = config_util.get_configs_from_pipeline_file(fname) override_dict = { 'train_input_path': data_path, 'eval_input_path': data_path, 'label_map_path': label_map_path } return config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) def _get_configs_for_model_sequence_example(model_name, frame_index=-1): """Returns configurations for model.""" fname = os.path.join(tf.resource_loader.get_data_files_path(), 'test_data/' + model_name + '.config') label_map_path = os.path.join(tf.resource_loader.get_data_files_path(), 'data/snapshot_serengeti_label_map.pbtxt') data_path = os.path.join( tf.resource_loader.get_data_files_path(), 'test_data/snapshot_serengeti_sequence_examples.record') configs = config_util.get_configs_from_pipeline_file(fname) override_dict = { 'train_input_path': data_path, 'eval_input_path': data_path, 'label_map_path': label_map_path, 'frame_index': frame_index } return config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) def _make_initializable_iterator(dataset): """Creates an iterator, and initializes tables. Args: dataset: A `tf.data.Dataset` object. Returns: A `tf.data.Iterator`. """ iterator = tf.data.make_initializable_iterator(dataset) tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) return iterator @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests under TF2.X.') class InputFnTest(test_case.TestCase, parameterized.TestCase): def test_faster_rcnn_resnet50_train_input(self): """Tests the training input function for FasterRcnnResnet50.""" configs = _get_configs_for_model('faster_rcnn_resnet50_pets') model_config = configs['model'] model_config.faster_rcnn.num_classes = 37 train_input_fn = inputs.create_train_input_fn( configs['train_config'], configs['train_input_config'], model_config) features, labels = _make_initializable_iterator(train_input_fn()).get_next() self.assertAllEqual([1, None, None, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [1, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [1, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [1, 100], labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) self.assertAllEqual( [1, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_confidences].dtype) def test_faster_rcnn_resnet50_train_input_with_additional_channels(self): """Tests the training input function for FasterRcnnResnet50.""" configs = _get_configs_for_model('faster_rcnn_resnet50_pets') model_config = configs['model'] configs['train_input_config'].num_additional_channels = 2 configs['train_config'].retain_original_images = True model_config.faster_rcnn.num_classes = 37 train_input_fn = inputs.create_train_input_fn( configs['train_config'], configs['train_input_config'], model_config) features, labels = _make_initializable_iterator(train_input_fn()).get_next() self.assertAllEqual([1, None, None, 5], features[fields.InputDataFields.image].shape.as_list()) self.assertAllEqual( [1, None, None, 3], features[fields.InputDataFields.original_image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [1, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [1, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [1, 100], labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) self.assertAllEqual( [1, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_confidences].dtype) @parameterized.parameters( {'eval_batch_size': 1}, {'eval_batch_size': 8} ) def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1): """Tests the eval input function for FasterRcnnResnet50.""" configs = _get_configs_for_model('faster_rcnn_resnet50_pets') model_config = configs['model'] model_config.faster_rcnn.num_classes = 37 eval_config = configs['eval_config'] eval_config.batch_size = eval_batch_size eval_input_fn = inputs.create_eval_input_fn( eval_config, configs['eval_input_configs'][0], model_config) features, labels = _make_initializable_iterator(eval_input_fn()).get_next() self.assertAllEqual([eval_batch_size, None, None, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual( [eval_batch_size, None, None, 3], features[fields.InputDataFields.original_image].shape.as_list()) self.assertEqual(tf.uint8, features[fields.InputDataFields.original_image].dtype) self.assertAllEqual([eval_batch_size], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [eval_batch_size, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [eval_batch_size, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_area].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_area].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) self.assertEqual( tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) self.assertEqual( tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype) def test_context_rcnn_resnet50_train_input_with_sequence_example( self, train_batch_size=8): """Tests the training input function for FasterRcnnResnet50.""" configs = _get_configs_for_model_sequence_example( 'context_rcnn_camera_trap') model_config = configs['model'] train_config = configs['train_config'] train_config.batch_size = train_batch_size train_input_fn = inputs.create_train_input_fn( train_config, configs['train_input_config'], model_config) features, labels = _make_initializable_iterator(train_input_fn()).get_next() self.assertAllEqual([train_batch_size, 640, 640, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual([train_batch_size], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [train_batch_size, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [train_batch_size, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [train_batch_size, 100], labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) self.assertAllEqual( [train_batch_size, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_confidences].dtype) def test_context_rcnn_resnet50_eval_input_with_sequence_example( self, eval_batch_size=8): """Tests the eval input function for FasterRcnnResnet50.""" configs = _get_configs_for_model_sequence_example( 'context_rcnn_camera_trap') model_config = configs['model'] eval_config = configs['eval_config'] eval_config.batch_size = eval_batch_size eval_input_fn = inputs.create_eval_input_fn( eval_config, configs['eval_input_configs'][0], model_config) features, labels = _make_initializable_iterator(eval_input_fn()).get_next() self.assertAllEqual([eval_batch_size, 640, 640, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual( [eval_batch_size, 640, 640, 3], features[fields.InputDataFields.original_image].shape.as_list()) self.assertEqual(tf.uint8, features[fields.InputDataFields.original_image].dtype) self.assertAllEqual([eval_batch_size], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [eval_batch_size, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [eval_batch_size, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) def test_context_rcnn_resnet50_eval_input_with_sequence_example_image_id_list( self, eval_batch_size=8): """Tests the eval input function for FasterRcnnResnet50.""" configs = _get_configs_for_model_sequence_example( 'context_rcnn_camera_trap') model_config = configs['model'] eval_config = configs['eval_config'] eval_config.batch_size = eval_batch_size eval_input_config = configs['eval_input_configs'][0] eval_input_config.load_context_image_ids = True eval_input_fn = inputs.create_eval_input_fn( eval_config, eval_input_config, model_config) features, labels = _make_initializable_iterator(eval_input_fn()).get_next() self.assertAllEqual([eval_batch_size, 640, 640, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual( [eval_batch_size, 640, 640, 3], features[fields.InputDataFields.original_image].shape.as_list()) self.assertEqual(tf.uint8, features[fields.InputDataFields.original_image].dtype) self.assertAllEqual([eval_batch_size], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [eval_batch_size, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [eval_batch_size, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) def test_context_rcnn_resnet50_train_input_with_sequence_example_frame_index( self, train_batch_size=8): """Tests the training input function for FasterRcnnResnet50.""" configs = _get_configs_for_model_sequence_example( 'context_rcnn_camera_trap', frame_index=2) model_config = configs['model'] train_config = configs['train_config'] train_config.batch_size = train_batch_size train_input_fn = inputs.create_train_input_fn( train_config, configs['train_input_config'], model_config) features, labels = _make_initializable_iterator(train_input_fn()).get_next() self.assertAllEqual([train_batch_size, 640, 640, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual([train_batch_size], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [train_batch_size, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [train_batch_size, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [train_batch_size, 100], labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) self.assertAllEqual( [train_batch_size, 100, model_config.faster_rcnn.num_classes], labels[fields.InputDataFields.groundtruth_confidences].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_confidences].dtype) def test_ssd_inceptionV2_train_input(self): """Tests the training input function for SSDInceptionV2.""" configs = _get_configs_for_model('ssd_inception_v2_pets') model_config = configs['model'] model_config.ssd.num_classes = 37 batch_size = configs['train_config'].batch_size train_input_fn = inputs.create_train_input_fn( configs['train_config'], configs['train_input_config'], model_config) features, labels = _make_initializable_iterator(train_input_fn()).get_next() self.assertAllEqual([batch_size, 300, 300, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual([batch_size], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [batch_size], labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list()) self.assertEqual(tf.int32, labels[fields.InputDataFields.num_groundtruth_boxes].dtype) self.assertAllEqual( [batch_size, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [batch_size, 100, model_config.ssd.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [batch_size, 100], labels[ fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) @parameterized.parameters( {'eval_batch_size': 1}, {'eval_batch_size': 8} ) def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1): """Tests the eval input function for SSDInceptionV2.""" configs = _get_configs_for_model('ssd_inception_v2_pets') model_config = configs['model'] model_config.ssd.num_classes = 37 eval_config = configs['eval_config'] eval_config.batch_size = eval_batch_size eval_input_fn = inputs.create_eval_input_fn( eval_config, configs['eval_input_configs'][0], model_config) features, labels = _make_initializable_iterator(eval_input_fn()).get_next() self.assertAllEqual([eval_batch_size, 300, 300, 3], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual( [eval_batch_size, 300, 300, 3], features[fields.InputDataFields.original_image].shape.as_list()) self.assertEqual(tf.uint8, features[fields.InputDataFields.original_image].dtype) self.assertAllEqual([eval_batch_size], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [eval_batch_size, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [eval_batch_size, 100, model_config.ssd.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[ fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual( tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_area].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_area].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) self.assertEqual( tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) self.assertEqual( tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype) def test_ssd_inceptionV2_eval_input_with_additional_channels( self, eval_batch_size=1): """Tests the eval input function for SSDInceptionV2 with additional channel. Args: eval_batch_size: Batch size for eval set. """ configs = _get_configs_for_model('ssd_inception_v2_pets') model_config = configs['model'] model_config.ssd.num_classes = 37 configs['eval_input_configs'][0].num_additional_channels = 1 eval_config = configs['eval_config'] eval_config.batch_size = eval_batch_size eval_config.retain_original_image_additional_channels = True eval_input_fn = inputs.create_eval_input_fn( eval_config, configs['eval_input_configs'][0], model_config) features, labels = _make_initializable_iterator(eval_input_fn()).get_next() self.assertAllEqual([eval_batch_size, 300, 300, 4], features[fields.InputDataFields.image].shape.as_list()) self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype) self.assertAllEqual( [eval_batch_size, 300, 300, 3], features[fields.InputDataFields.original_image].shape.as_list()) self.assertEqual(tf.uint8, features[fields.InputDataFields.original_image].dtype) self.assertAllEqual([eval_batch_size, 300, 300, 1], features[ fields.InputDataFields.image_additional_channels].shape.as_list()) self.assertEqual( tf.uint8, features[fields.InputDataFields.image_additional_channels].dtype) self.assertAllEqual([eval_batch_size], features[inputs.HASH_KEY].shape.as_list()) self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype) self.assertAllEqual( [eval_batch_size, 100, 4], labels[fields.InputDataFields.groundtruth_boxes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_boxes].dtype) self.assertAllEqual( [eval_batch_size, 100, model_config.ssd.num_classes], labels[fields.InputDataFields.groundtruth_classes].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_classes].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_weights].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_weights].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_area].shape.as_list()) self.assertEqual(tf.float32, labels[fields.InputDataFields.groundtruth_area].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list()) self.assertEqual(tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype) self.assertAllEqual( [eval_batch_size, 100], labels[fields.InputDataFields.groundtruth_difficult].shape.as_list()) self.assertEqual(tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype) def test_predict_input(self): """Tests the predict input function.""" configs = _get_configs_for_model('ssd_inception_v2_pets') predict_input_fn = inputs.create_predict_input_fn( model_config=configs['model'], predict_input_config=configs['eval_input_configs'][0]) serving_input_receiver = predict_input_fn() image = serving_input_receiver.features[fields.InputDataFields.image] receiver_tensors = serving_input_receiver.receiver_tensors[ inputs.SERVING_FED_EXAMPLE_KEY] self.assertEqual([1, 300, 300, 3], image.shape.as_list()) self.assertEqual(tf.float32, image.dtype) self.assertEqual(tf.string, receiver_tensors.dtype) def test_predict_input_with_additional_channels(self): """Tests the predict input function with additional channels.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['eval_input_configs'][0].num_additional_channels = 2 predict_input_fn = inputs.create_predict_input_fn( model_config=configs['model'], predict_input_config=configs['eval_input_configs'][0]) serving_input_receiver = predict_input_fn() image = serving_input_receiver.features[fields.InputDataFields.image] receiver_tensors = serving_input_receiver.receiver_tensors[ inputs.SERVING_FED_EXAMPLE_KEY] # RGB + 2 additional channels = 5 channels. self.assertEqual([1, 300, 300, 5], image.shape.as_list()) self.assertEqual(tf.float32, image.dtype) self.assertEqual(tf.string, receiver_tensors.dtype) def test_error_with_bad_train_config(self): """Tests that a TypeError is raised with improper train config.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['model'].ssd.num_classes = 37 train_input_fn = inputs.create_train_input_fn( train_config=configs['eval_config'], # Expecting `TrainConfig`. train_input_config=configs['train_input_config'], model_config=configs['model']) with self.assertRaises(TypeError): train_input_fn() def test_error_with_bad_train_input_config(self): """Tests that a TypeError is raised with improper train input config.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['model'].ssd.num_classes = 37 train_input_fn = inputs.create_train_input_fn( train_config=configs['train_config'], train_input_config=configs['model'], # Expecting `InputReader`. model_config=configs['model']) with self.assertRaises(TypeError): train_input_fn() def test_error_with_bad_train_model_config(self): """Tests that a TypeError is raised with improper train model config.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['model'].ssd.num_classes = 37 train_input_fn = inputs.create_train_input_fn( train_config=configs['train_config'], train_input_config=configs['train_input_config'], model_config=configs['train_config']) # Expecting `DetectionModel`. with self.assertRaises(TypeError): train_input_fn() def test_error_with_bad_eval_config(self): """Tests that a TypeError is raised with improper eval config.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['model'].ssd.num_classes = 37 eval_input_fn = inputs.create_eval_input_fn( eval_config=configs['train_config'], # Expecting `EvalConfig`. eval_input_config=configs['eval_input_configs'][0], model_config=configs['model']) with self.assertRaises(TypeError): eval_input_fn() def test_error_with_bad_eval_input_config(self): """Tests that a TypeError is raised with improper eval input config.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['model'].ssd.num_classes = 37 eval_input_fn = inputs.create_eval_input_fn( eval_config=configs['eval_config'], eval_input_config=configs['model'], # Expecting `InputReader`. model_config=configs['model']) with self.assertRaises(TypeError): eval_input_fn() def test_error_with_bad_eval_model_config(self): """Tests that a TypeError is raised with improper eval model config.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['model'].ssd.num_classes = 37 eval_input_fn = inputs.create_eval_input_fn( eval_config=configs['eval_config'], eval_input_config=configs['eval_input_configs'][0], model_config=configs['eval_config']) # Expecting `DetectionModel`. with self.assertRaises(TypeError): eval_input_fn() def test_output_equal_in_replace_empty_string_with_random_number(self): string_placeholder = tf.placeholder(tf.string, shape=[]) replaced_string = inputs._replace_empty_string_with_random_number( string_placeholder) test_string = b'hello world' feed_dict = {string_placeholder: test_string} with self.test_session() as sess: out_string = sess.run(replaced_string, feed_dict=feed_dict) self.assertEqual(test_string, out_string) def test_output_is_integer_in_replace_empty_string_with_random_number(self): string_placeholder = tf.placeholder(tf.string, shape=[]) replaced_string = inputs._replace_empty_string_with_random_number( string_placeholder) empty_string = '' feed_dict = {string_placeholder: empty_string} with self.test_session() as sess: out_string = sess.run(replaced_string, feed_dict=feed_dict) is_integer = True try: # Test whether out_string is a string which represents an integer, the # casting below will throw an error if out_string is not castable to int. int(out_string) except ValueError: is_integer = False self.assertTrue(is_integer) def test_force_no_resize(self): """Tests the functionality of force_no_reisze option.""" configs = _get_configs_for_model('ssd_inception_v2_pets') configs['eval_config'].force_no_resize = True eval_input_fn = inputs.create_eval_input_fn( eval_config=configs['eval_config'], eval_input_config=configs['eval_input_configs'][0], model_config=configs['model'] ) train_input_fn = inputs.create_train_input_fn( train_config=configs['train_config'], train_input_config=configs['train_input_config'], model_config=configs['model'] ) features_train, _ = _make_initializable_iterator( train_input_fn()).get_next() features_eval, _ = _make_initializable_iterator( eval_input_fn()).get_next() images_train, images_eval = features_train['image'], features_eval['image'] self.assertEqual([1, None, None, 3], images_eval.shape.as_list()) self.assertEqual([24, 300, 300, 3], images_train.shape.as_list()) class DataAugmentationFnTest(test_case.TestCase): def test_apply_image_and_box_augmentation(self): data_augmentation_options = [ (preprocessor.resize_image, { 'new_height': 20, 'new_width': 20, 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR }), (preprocessor.scale_boxes_to_pixel_coordinates, {}), ] data_augmentation_fn = functools.partial( inputs.augment_input_data, data_augmentation_options=data_augmentation_options) def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)) } augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) return (augmented_tensor_dict[fields.InputDataFields.image], augmented_tensor_dict[fields.InputDataFields. groundtruth_boxes]) image, groundtruth_boxes = self.execute_cpu(graph_fn, []) self.assertAllEqual(image.shape, [20, 20, 3]) self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]]) def test_apply_image_and_box_augmentation_with_scores(self): data_augmentation_options = [ (preprocessor.resize_image, { 'new_height': 20, 'new_width': 20, 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR }), (preprocessor.scale_boxes_to_pixel_coordinates, {}), ] data_augmentation_fn = functools.partial( inputs.augment_input_data, data_augmentation_options=data_augmentation_options) def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1.0], np.float32)), fields.InputDataFields.groundtruth_weights: tf.constant(np.array([0.8], np.float32)), } augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) return (augmented_tensor_dict[fields.InputDataFields.image], augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes], augmented_tensor_dict[fields.InputDataFields.groundtruth_classes], augmented_tensor_dict[fields.InputDataFields.groundtruth_weights]) (image, groundtruth_boxes, groundtruth_classes, groundtruth_weights) = self.execute_cpu(graph_fn, []) self.assertAllEqual(image.shape, [20, 20, 3]) self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]]) self.assertAllClose(groundtruth_classes.shape, [1.0]) self.assertAllClose(groundtruth_weights, [0.8]) def test_include_masks_in_data_augmentation(self): data_augmentation_options = [ (preprocessor.resize_image, { 'new_height': 20, 'new_width': 20, 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR }) ] data_augmentation_fn = functools.partial( inputs.augment_input_data, data_augmentation_options=data_augmentation_options) def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), fields.InputDataFields.groundtruth_instance_masks: tf.constant(np.zeros([2, 10, 10], np.uint8)), fields.InputDataFields.groundtruth_instance_mask_weights: tf.constant([1.0, 0.0], np.float32) } augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) return (augmented_tensor_dict[fields.InputDataFields.image], augmented_tensor_dict[fields.InputDataFields. groundtruth_instance_masks], augmented_tensor_dict[fields.InputDataFields. groundtruth_instance_mask_weights]) image, masks, mask_weights = self.execute_cpu(graph_fn, []) self.assertAllEqual(image.shape, [20, 20, 3]) self.assertAllEqual(masks.shape, [2, 20, 20]) self.assertAllClose(mask_weights, [1.0, 0.0]) def test_include_keypoints_in_data_augmentation(self): data_augmentation_options = [ (preprocessor.resize_image, { 'new_height': 20, 'new_width': 20, 'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR }), (preprocessor.scale_boxes_to_pixel_coordinates, {}), ] data_augmentation_fn = functools.partial( inputs.augment_input_data, data_augmentation_options=data_augmentation_options) def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(10, 10, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)), fields.InputDataFields.groundtruth_keypoints: tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32)) } augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict) return (augmented_tensor_dict[fields.InputDataFields.image], augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes], augmented_tensor_dict[fields.InputDataFields. groundtruth_keypoints]) image, boxes, keypoints = self.execute_cpu(graph_fn, []) self.assertAllEqual(image.shape, [20, 20, 3]) self.assertAllClose(boxes, [[10, 10, 20, 20]]) self.assertAllClose(keypoints, [[[10, 20], [10, 10]]]) def _fake_model_preprocessor_fn(image): return (image, tf.expand_dims(tf.shape(image)[1:], axis=0)) def _fake_image_resizer_fn(image, mask): return (image, mask, tf.shape(image)) def _fake_resize50_preprocess_fn(image): image = image[0] image, shape = preprocessor.resize_to_range( image, min_dimension=50, max_dimension=50, pad_to_max_dimension=True) return tf.expand_dims(image, 0), tf.expand_dims(shape, axis=0) class DataTransformationFnTest(test_case.TestCase, parameterized.TestCase): def test_combine_additional_channels_if_present(self): image = np.random.rand(4, 4, 3).astype(np.float32) additional_channels = np.random.rand(4, 4, 2).astype(np.float32) def graph_fn(image, additional_channels): tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.image_additional_channels: additional_channels, fields.InputDataFields.groundtruth_classes: tf.constant([1, 1], tf.int32) } input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=1) out_tensors = input_transformation_fn(tensor_dict=tensor_dict) return out_tensors[fields.InputDataFields.image] out_image = self.execute_cpu(graph_fn, [image, additional_channels]) self.assertAllEqual(out_image.dtype, tf.float32) self.assertAllEqual(out_image.shape, [4, 4, 5]) self.assertAllClose(out_image, np.concatenate((image, additional_channels), axis=2)) def test_use_multiclass_scores_when_present(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3). astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)), fields.InputDataFields.multiclass_scores: tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 2], np.int32)) } input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=3, use_multiclass_scores=True) transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) return transformed_inputs[fields.InputDataFields.groundtruth_classes] groundtruth_classes = self.execute_cpu(graph_fn, []) self.assertAllClose( np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32), groundtruth_classes) @unittest.skipIf(tf_version.is_tf2(), ('Skipping due to different behaviour ' 'in TF 2.X')) def test_use_multiclass_scores_when_not_present(self): def graph_fn(): zero_num_elements = tf.random.uniform([], minval=0, maxval=1, dtype=tf.int32) tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)), fields.InputDataFields.multiclass_scores: tf.zeros(zero_num_elements), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 2], np.int32)) } input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=3, use_multiclass_scores=True) transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) return transformed_inputs[fields.InputDataFields.groundtruth_classes] groundtruth_classes = self.execute_cpu(graph_fn, []) self.assertAllClose( np.array([[0, 1, 0], [0, 0, 1]], np.float32), groundtruth_classes) @parameterized.parameters( {'labeled_classes': [1, 2]}, {'labeled_classes': []}, {'labeled_classes': [1, -1, 2]} # -1 denotes an unrecognized class ) def test_use_labeled_classes(self, labeled_classes): def compute_fn(image, groundtruth_boxes, groundtruth_classes, groundtruth_labeled_classes): tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: groundtruth_boxes, fields.InputDataFields.groundtruth_classes: groundtruth_classes, fields.InputDataFields.groundtruth_labeled_classes: groundtruth_labeled_classes } input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=3) return input_transformation_fn(tensor_dict=tensor_dict) image = np.random.rand(4, 4, 3).astype(np.float32) groundtruth_boxes = np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32) groundtruth_classes = np.array([1, 2], np.int32) groundtruth_labeled_classes = np.array(labeled_classes, np.int32) transformed_inputs = self.execute_cpu(compute_fn, [ image, groundtruth_boxes, groundtruth_classes, groundtruth_labeled_classes ]) if labeled_classes == [1, 2] or labeled_classes == [1, -1, 2]: transformed_labeled_classes = [1, 1, 0] elif not labeled_classes: transformed_labeled_classes = [1, 1, 1] else: logging.exception('Unexpected labeled_classes %r', labeled_classes) self.assertAllEqual( np.array(transformed_labeled_classes, np.float32), transformed_inputs[fields.InputDataFields.groundtruth_labeled_classes]) def test_returns_correct_class_label_encodings(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)) } num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes) transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) return (transformed_inputs[fields.InputDataFields.groundtruth_classes], transformed_inputs[fields.InputDataFields. groundtruth_confidences]) (groundtruth_classes, groundtruth_confidences) = self.execute_cpu(graph_fn, []) self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]]) self.assertAllClose(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]]) def test_returns_correct_labels_with_unrecognized_class(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant( np.array([[0, 0, 1, 1], [.2, .2, 4, 4], [.5, .5, 1, 1]], np.float32)), fields.InputDataFields.groundtruth_area: tf.constant(np.array([.5, .4, .3])), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, -1, 1], np.int32)), fields.InputDataFields.groundtruth_keypoints: tf.constant( np.array([[[.1, .1]], [[.2, .2]], [[.5, .5]]], np.float32)), fields.InputDataFields.groundtruth_keypoint_visibilities: tf.constant([[True, True], [False, False], [True, True]]), fields.InputDataFields.groundtruth_instance_masks: tf.constant(np.random.rand(3, 4, 4).astype(np.float32)), fields.InputDataFields.groundtruth_is_crowd: tf.constant([False, True, False]), fields.InputDataFields.groundtruth_difficult: tf.constant(np.array([0, 0, 1], np.int32)) } num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes) transformed_inputs = input_transformation_fn(tensor_dict) return (transformed_inputs[fields.InputDataFields.groundtruth_classes], transformed_inputs[fields.InputDataFields.num_groundtruth_boxes], transformed_inputs[fields.InputDataFields.groundtruth_area], transformed_inputs[fields.InputDataFields. groundtruth_confidences], transformed_inputs[fields.InputDataFields.groundtruth_boxes], transformed_inputs[fields.InputDataFields.groundtruth_keypoints], transformed_inputs[fields.InputDataFields. groundtruth_keypoint_visibilities], transformed_inputs[fields.InputDataFields. groundtruth_instance_masks], transformed_inputs[fields.InputDataFields.groundtruth_is_crowd], transformed_inputs[fields.InputDataFields.groundtruth_difficult]) (groundtruth_classes, num_groundtruth_boxes, groundtruth_area, groundtruth_confidences, groundtruth_boxes, groundtruth_keypoints, groundtruth_keypoint_visibilities, groundtruth_instance_masks, groundtruth_is_crowd, groundtruth_difficult) = self.execute_cpu(graph_fn, []) self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]]) self.assertAllEqual(num_groundtruth_boxes, 2) self.assertAllClose(groundtruth_area, [.5, .3]) self.assertAllEqual(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]]) self.assertAllClose(groundtruth_boxes, [[0, 0, 1, 1], [.5, .5, 1, 1]]) self.assertAllClose(groundtruth_keypoints, [[[.1, .1]], [[.5, .5]]]) self.assertAllEqual(groundtruth_keypoint_visibilities, [[True, True], [True, True]]) self.assertAllEqual(groundtruth_instance_masks.shape, [2, 4, 4]) self.assertAllEqual(groundtruth_is_crowd, [False, False]) self.assertAllEqual(groundtruth_difficult, [0, 1]) def test_returns_correct_merged_boxes(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)) } num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes, merge_multiple_boxes=True) transformed_inputs = input_transformation_fn(tensor_dict) return (transformed_inputs[fields.InputDataFields.groundtruth_boxes], transformed_inputs[fields.InputDataFields.groundtruth_classes], transformed_inputs[fields.InputDataFields. groundtruth_confidences], transformed_inputs[fields.InputDataFields.num_groundtruth_boxes]) (groundtruth_boxes, groundtruth_classes, groundtruth_confidences, num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) self.assertAllClose( groundtruth_boxes, [[.5, .5, 1., 1.]]) self.assertAllClose( groundtruth_classes, [[1, 0, 1]]) self.assertAllClose( groundtruth_confidences, [[1, 0, 1]]) self.assertAllClose( num_groundtruth_boxes, 1) def test_returns_correct_groundtruth_confidences_when_input_present(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)), fields.InputDataFields.groundtruth_confidences: tf.constant(np.array([1.0, -1.0], np.float32)) } num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes) transformed_inputs = input_transformation_fn(tensor_dict) return (transformed_inputs[fields.InputDataFields.groundtruth_classes], transformed_inputs[fields.InputDataFields. groundtruth_confidences]) groundtruth_classes, groundtruth_confidences = self.execute_cpu(graph_fn, []) self.assertAllClose( groundtruth_classes, [[0, 0, 1], [1, 0, 0]]) self.assertAllClose( groundtruth_confidences, [[0, 0, 1], [-1, 0, 0]]) def test_returns_resized_masks(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).astype(np.float32)), fields.InputDataFields.groundtruth_instance_masks: tf.constant(np.random.rand(2, 4, 4).astype(np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)), fields.InputDataFields.original_image_spatial_shape: tf.constant(np.array([4, 4], np.int32)) } def fake_image_resizer_fn(image, masks=None): resized_image = tf.image.resize_images(image, [8, 8]) results = [resized_image] if masks is not None: resized_masks = tf.transpose( tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]), [2, 0, 1]) results.append(resized_masks) results.append(tf.shape(resized_image)) return results num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=fake_image_resizer_fn, num_classes=num_classes, retain_original_image=True) transformed_inputs = input_transformation_fn(tensor_dict) return (transformed_inputs[fields.InputDataFields.original_image], transformed_inputs[fields.InputDataFields. original_image_spatial_shape], transformed_inputs[fields.InputDataFields. groundtruth_instance_masks]) (original_image, original_image_shape, groundtruth_instance_masks) = self.execute_cpu(graph_fn, []) self.assertEqual(original_image.dtype, np.uint8) self.assertAllEqual(original_image_shape, [4, 4]) self.assertAllEqual(original_image.shape, [8, 8, 3]) self.assertAllEqual(groundtruth_instance_masks.shape, [2, 8, 8]) def test_applies_model_preprocess_fn_to_image_tensor(self): np_image = np.random.randint(256, size=(4, 4, 3)) def graph_fn(image): tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)) } def fake_model_preprocessor_fn(image): return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0)) num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes) transformed_inputs = input_transformation_fn(tensor_dict) return (transformed_inputs[fields.InputDataFields.image], transformed_inputs[fields.InputDataFields.true_image_shape]) image, true_image_shape = self.execute_cpu(graph_fn, [np_image]) self.assertAllClose(image, np_image / 255.) self.assertAllClose(true_image_shape, [4, 4, 3]) def test_applies_data_augmentation_fn_to_tensor_dict(self): np_image = np.random.randint(256, size=(4, 4, 3)) def graph_fn(image): tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)) } def add_one_data_augmentation_fn(tensor_dict): return {key: value + 1 for key, value in tensor_dict.items()} num_classes = 4 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes, data_augmentation_fn=add_one_data_augmentation_fn) transformed_inputs = input_transformation_fn(tensor_dict) return (transformed_inputs[fields.InputDataFields.image], transformed_inputs[fields.InputDataFields.groundtruth_classes]) image, groundtruth_classes = self.execute_cpu(graph_fn, [np_image]) self.assertAllEqual(image, np_image + 1) self.assertAllEqual( groundtruth_classes, [[0, 0, 0, 1], [0, 1, 0, 0]]) def test_applies_data_augmentation_fn_before_model_preprocess_fn(self): np_image = np.random.randint(256, size=(4, 4, 3)) def graph_fn(image): tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_classes: tf.constant(np.array([3, 1], np.int32)) } def mul_two_model_preprocessor_fn(image): return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0)) def add_five_to_image_data_augmentation_fn(tensor_dict): tensor_dict[fields.InputDataFields.image] += 5 return tensor_dict num_classes = 4 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=mul_two_model_preprocessor_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes, data_augmentation_fn=add_five_to_image_data_augmentation_fn) transformed_inputs = input_transformation_fn(tensor_dict) return transformed_inputs[fields.InputDataFields.image] image = self.execute_cpu(graph_fn, [np_image]) self.assertAllEqual(image, (np_image + 5) * 2) def test_resize_with_padding(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 2], np.int32)), fields.InputDataFields.groundtruth_keypoints: tf.constant([[[0.1, 0.2]], [[0.3, 0.4]]]), } num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_resize50_preprocess_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes,) transformed_inputs = input_transformation_fn(tensor_dict) return (transformed_inputs[fields.InputDataFields.groundtruth_boxes], transformed_inputs[fields.InputDataFields.groundtruth_keypoints]) groundtruth_boxes, groundtruth_keypoints = self.execute_cpu(graph_fn, []) self.assertAllClose( groundtruth_boxes, [[.5, .25, 1., .5], [.0, .0, .5, .25]]) self.assertAllClose( groundtruth_keypoints, [[[.1, .1]], [[.3, .2]]]) def test_groundtruth_keypoint_weights(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 2], np.int32)), fields.InputDataFields.groundtruth_keypoints: tf.constant([[[0.1, 0.2], [0.3, 0.4]], [[0.5, 0.6], [0.7, 0.8]]]), fields.InputDataFields.groundtruth_keypoint_visibilities: tf.constant([[True, False], [True, True]]), } num_classes = 3 keypoint_type_weight = [1.0, 2.0] input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_resize50_preprocess_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes, keypoint_type_weight=keypoint_type_weight) transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints], transformed_inputs[fields.InputDataFields. groundtruth_keypoint_weights]) groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu( graph_fn, []) self.assertAllClose( groundtruth_keypoints, [[[0.1, 0.1], [0.3, 0.2]], [[0.5, 0.3], [0.7, 0.4]]]) self.assertAllClose( groundtruth_keypoint_weights, [[1.0, 0.0], [1.0, 2.0]]) def test_groundtruth_keypoint_weights_default(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 2], np.int32)), fields.InputDataFields.groundtruth_keypoints: tf.constant([[[0.1, 0.2], [0.3, 0.4]], [[0.5, 0.6], [0.7, 0.8]]]), } num_classes = 3 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_resize50_preprocess_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes) transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints], transformed_inputs[fields.InputDataFields. groundtruth_keypoint_weights]) groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu( graph_fn, []) self.assertAllClose( groundtruth_keypoints, [[[0.1, 0.1], [0.3, 0.2]], [[0.5, 0.3], [0.7, 0.4]]]) self.assertAllClose( groundtruth_keypoint_weights, [[1.0, 1.0], [1.0, 1.0]]) def test_groundtruth_dense_pose(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 2], np.int32)), fields.InputDataFields.groundtruth_dp_num_points: tf.constant([0, 2], dtype=tf.int32), fields.InputDataFields.groundtruth_dp_part_ids: tf.constant([[0, 0], [4, 23]], dtype=tf.int32), fields.InputDataFields.groundtruth_dp_surface_coords: tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]], [[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]], dtype=tf.float32), } num_classes = 1 input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_resize50_preprocess_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes) transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) transformed_dp_num_points = transformed_inputs[ fields.InputDataFields.groundtruth_dp_num_points] transformed_dp_part_ids = transformed_inputs[ fields.InputDataFields.groundtruth_dp_part_ids] transformed_dp_surface_coords = transformed_inputs[ fields.InputDataFields.groundtruth_dp_surface_coords] return (transformed_dp_num_points, transformed_dp_part_ids, transformed_dp_surface_coords) dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu( graph_fn, []) self.assertAllEqual(dp_num_points, [0, 2]) self.assertAllEqual(dp_part_ids, [[0, 0], [4, 23]]) self.assertAllClose( dp_surface_coords, [[[0., 0., 0., 0.,], [0., 0., 0., 0.,]], [[0.1, 0.1, 0.3, 0.4,], [0.6, 0.4, 0.6, 0.7,]]]) def test_groundtruth_keypoint_depths(self): def graph_fn(): tensor_dict = { fields.InputDataFields.image: tf.constant(np.random.rand(100, 50, 3).astype(np.float32)), fields.InputDataFields.groundtruth_boxes: tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]], np.float32)), fields.InputDataFields.groundtruth_classes: tf.constant(np.array([1, 2], np.int32)), fields.InputDataFields.groundtruth_keypoints: tf.constant([[[0.1, 0.2], [0.3, 0.4]], [[0.5, 0.6], [0.7, 0.8]]]), fields.InputDataFields.groundtruth_keypoint_visibilities: tf.constant([[True, False], [True, True]]), fields.InputDataFields.groundtruth_keypoint_depths: tf.constant([[1.0, 0.9], [0.8, 0.7]]), fields.InputDataFields.groundtruth_keypoint_depth_weights: tf.constant([[0.7, 0.8], [0.9, 1.0]]), } num_classes = 3 keypoint_type_weight = [1.0, 2.0] input_transformation_fn = functools.partial( inputs.transform_input_data, model_preprocess_fn=_fake_resize50_preprocess_fn, image_resizer_fn=_fake_image_resizer_fn, num_classes=num_classes, keypoint_type_weight=keypoint_type_weight) transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict) return (transformed_inputs[ fields.InputDataFields.groundtruth_keypoint_depths], transformed_inputs[ fields.InputDataFields.groundtruth_keypoint_depth_weights]) keypoint_depths, keypoint_depth_weights = self.execute_cpu(graph_fn, []) self.assertAllClose( keypoint_depths, [[1.0, 0.9], [0.8, 0.7]]) self.assertAllClose( keypoint_depth_weights, [[0.7, 0.8], [0.9, 1.0]]) class PadInputDataToStaticShapesFnTest(test_case.TestCase): def test_pad_images_boxes_and_classes(self): input_tensor_dict = { fields.InputDataFields.image: tf.random.uniform([3, 3, 3]), fields.InputDataFields.groundtruth_boxes: tf.random.uniform([2, 4]), fields.InputDataFields.groundtruth_classes: tf.random.uniform([2, 3], minval=0, maxval=2, dtype=tf.int32), fields.InputDataFields.true_image_shape: tf.constant([3, 3, 3]), fields.InputDataFields.original_image_spatial_shape: tf.constant([3, 3]) } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), [5, 6, 3]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.true_image_shape] .shape.as_list(), [3]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape] .shape.as_list(), [2]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_boxes] .shape.as_list(), [3, 4]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_classes] .shape.as_list(), [3, 3]) def test_clip_boxes_and_classes(self): def graph_fn(): input_tensor_dict = { fields.InputDataFields.groundtruth_boxes: tf.random.uniform([5, 4]), fields.InputDataFields.groundtruth_classes: tf.random.uniform([2, 3], maxval=10, dtype=tf.int32), fields.InputDataFields.num_groundtruth_boxes: tf.constant(5) } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) return (padded_tensor_dict[fields.InputDataFields.groundtruth_boxes], padded_tensor_dict[fields.InputDataFields.groundtruth_classes], padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes]) (groundtruth_boxes, groundtruth_classes, num_groundtruth_boxes) = self.execute_cpu(graph_fn, []) self.assertAllEqual(groundtruth_boxes.shape, [3, 4]) self.assertAllEqual(groundtruth_classes.shape, [3, 3]) self.assertEqual(num_groundtruth_boxes, 3) def test_images_and_additional_channels(self): input_tensor_dict = { fields.InputDataFields.image: test_utils.image_with_dynamic_shape(4, 3, 5), fields.InputDataFields.image_additional_channels: test_utils.image_with_dynamic_shape(4, 3, 2), } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) # pad_input_data_to_static_shape assumes that image is already concatenated # with additional channels. self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), [5, 6, 5]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image_additional_channels] .shape.as_list(), [5, 6, 2]) def test_images_and_additional_channels_errors(self): input_tensor_dict = { fields.InputDataFields.image: test_utils.image_with_dynamic_shape(10, 10, 3), fields.InputDataFields.image_additional_channels: test_utils.image_with_dynamic_shape(10, 10, 2), fields.InputDataFields.original_image: test_utils.image_with_dynamic_shape(10, 10, 3), } with self.assertRaises(ValueError): _ = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) def test_gray_images(self): input_tensor_dict = { fields.InputDataFields.image: test_utils.image_with_dynamic_shape(4, 4, 1), } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), [5, 6, 1]) def test_gray_images_and_additional_channels(self): input_tensor_dict = { fields.InputDataFields.image: test_utils.image_with_dynamic_shape(4, 4, 3), fields.InputDataFields.image_additional_channels: test_utils.image_with_dynamic_shape(4, 4, 2), } # pad_input_data_to_static_shape assumes that image is already concatenated # with additional channels. padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image].shape.as_list(), [5, 6, 3]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.image_additional_channels] .shape.as_list(), [5, 6, 2]) def test_keypoints(self): keypoints = test_utils.keypoints_with_dynamic_shape(10, 16, 4) visibilities = tf.cast(tf.random.uniform(tf.shape(keypoints)[:-1], minval=0, maxval=2, dtype=tf.int32), tf.bool) input_tensor_dict = { fields.InputDataFields.groundtruth_keypoints: test_utils.keypoints_with_dynamic_shape(10, 16, 4), fields.InputDataFields.groundtruth_keypoint_visibilities: visibilities } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints] .shape.as_list(), [3, 16, 4]) self.assertAllEqual( padded_tensor_dict[ fields.InputDataFields.groundtruth_keypoint_visibilities] .shape.as_list(), [3, 16]) def test_dense_pose(self): input_tensor_dict = { fields.InputDataFields.groundtruth_dp_num_points: tf.constant([0, 2], dtype=tf.int32), fields.InputDataFields.groundtruth_dp_part_ids: tf.constant([[0, 0], [4, 23]], dtype=tf.int32), fields.InputDataFields.groundtruth_dp_surface_coords: tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]], [[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]], dtype=tf.float32), } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=1, spatial_image_shape=[128, 128], max_dp_points=200) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_dp_num_points] .shape.as_list(), [3]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids] .shape.as_list(), [3, 200]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_dp_surface_coords] .shape.as_list(), [3, 200, 4]) def test_pad_input_data_to_static_shapes_for_trackid(self): input_tensor_dict = { fields.InputDataFields.groundtruth_track_ids: tf.constant([0, 1], dtype=tf.int32), } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=1, spatial_image_shape=[128, 128]) self.assertAllEqual( padded_tensor_dict[fields.InputDataFields.groundtruth_track_ids] .shape.as_list(), [3]) def test_context_features(self): context_memory_size = 8 context_feature_length = 10 max_num_context_features = 20 def graph_fn(): input_tensor_dict = { fields.InputDataFields.context_features: tf.ones([context_memory_size, context_feature_length]), fields.InputDataFields.context_feature_length: tf.constant(context_feature_length) } padded_tensor_dict = inputs.pad_input_data_to_static_shapes( tensor_dict=input_tensor_dict, max_num_boxes=3, num_classes=3, spatial_image_shape=[5, 6], max_num_context_features=max_num_context_features, context_feature_length=context_feature_length) self.assertAllEqual( padded_tensor_dict[ fields.InputDataFields.context_features].shape.as_list(), [max_num_context_features, context_feature_length]) return padded_tensor_dict[fields.InputDataFields.valid_context_size] valid_context_size = self.execute_cpu(graph_fn, []) self.assertEqual(valid_context_size, context_memory_size) class NegativeSizeTest(test_case.TestCase): """Test for inputs and related funcitons.""" def test_negative_size_error(self): """Test that error is raised for negative size boxes.""" def graph_fn(): tensors = { fields.InputDataFields.image: tf.zeros((128, 128, 3)), fields.InputDataFields.groundtruth_classes: tf.constant([1, 1], tf.int32), fields.InputDataFields.groundtruth_boxes: tf.constant([[0.5, 0.5, 0.4, 0.5]], tf.float32) } tensors = inputs.transform_input_data( tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn, num_classes=10) return tensors[fields.InputDataFields.groundtruth_boxes] with self.assertRaises(tf.errors.InvalidArgumentError): self.execute_cpu(graph_fn, []) def test_negative_size_no_assert(self): """Test that negative size boxes are filtered out without assert. This test simulates the behaviour when we run on TPU and Assert ops are not supported. """ tensors = { fields.InputDataFields.image: tf.zeros((128, 128, 3)), fields.InputDataFields.groundtruth_classes: tf.constant([1, 1], tf.int32), fields.InputDataFields.groundtruth_boxes: tf.constant([[0.5, 0.5, 0.4, 0.5], [0.5, 0.5, 0.6, 0.6]], tf.float32) } with mock.patch.object(tf, 'Assert') as tf_assert: tf_assert.return_value = tf.no_op() tensors = inputs.transform_input_data( tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn, num_classes=10) self.assertAllClose(tensors[fields.InputDataFields.groundtruth_boxes], [[0.5, 0.5, 0.6, 0.6]]) if __name__ == '__main__': tf.test.main()
78,081
43.491168
86
py
models
models-master/research/object_detection/export_tflite_graph_tf2.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Exports TF2 detection SavedModel for conversion to TensorFlow Lite. Link to the TF2 Detection Zoo: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md The output folder will contain an intermediate SavedModel that can be used with the TfLite converter. NOTE: This only supports SSD meta-architectures for now. One input: image: a float32 tensor of shape[1, height, width, 3] containing the *normalized* input image. NOTE: See the `preprocess` function defined in the feature extractor class in the object_detection/models directory. Four Outputs: detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box locations detection_classes: a float32 tensor of shape [1, num_boxes] with class indices detection_scores: a float32 tensor of shape [1, num_boxes] with class scores num_boxes: a float32 tensor of size 1 containing the number of detected boxes Example Usage: -------------- python object_detection/export_tflite_graph_tf2.py \ --pipeline_config_path path/to/ssd_model/pipeline.config \ --trained_checkpoint_dir path/to/ssd_model/checkpoint \ --output_directory path/to/exported_model_directory The expected output SavedModel would be in the directory path/to/exported_model_directory (which is created if it does not exist). Config overrides (see the `config_override` flag) are text protobufs (also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override certain fields in the provided pipeline_config_path. These are useful for making small changes to the inference graph that differ from the training or eval config. Example Usage 1 (in which we change the NMS iou_threshold to be 0.5 and NMS score_threshold to be 0.0): python object_detection/export_tflite_model_tf2.py \ --pipeline_config_path path/to/ssd_model/pipeline.config \ --trained_checkpoint_dir path/to/ssd_model/checkpoint \ --output_directory path/to/exported_model_directory --config_override " \ model{ \ ssd{ \ post_processing { \ batch_non_max_suppression { \ score_threshold: 0.0 \ iou_threshold: 0.5 \ } \ } \ } \ } \ " Example Usage 2 (export CenterNet model for keypoint estimation task with fixed shape resizer and customized input resolution): python object_detection/export_tflite_model_tf2.py \ --pipeline_config_path path/to/ssd_model/pipeline.config \ --trained_checkpoint_dir path/to/ssd_model/checkpoint \ --output_directory path/to/exported_model_directory \ --keypoint_label_map_path path/to/label_map.txt \ --max_detections 10 \ --centernet_include_keypoints true \ --config_override " \ model{ \ center_net { \ image_resizer { \ fixed_shape_resizer { \ height: 320 \ width: 320 \ } \ } \ } \ }" \ """ from absl import app from absl import flags import tensorflow.compat.v2 as tf from google.protobuf import text_format from object_detection import export_tflite_graph_lib_tf2 from object_detection.protos import pipeline_pb2 tf.enable_v2_behavior() FLAGS = flags.FLAGS flags.DEFINE_string( 'pipeline_config_path', None, 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file.') flags.DEFINE_string('trained_checkpoint_dir', None, 'Path to trained checkpoint directory') flags.DEFINE_string('output_directory', None, 'Path to write outputs.') flags.DEFINE_string( 'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' 'text proto to override pipeline_config_path.') flags.DEFINE_integer('max_detections', 10, 'Maximum number of detections (boxes) to return.') # SSD-specific flags flags.DEFINE_bool( 'ssd_use_regular_nms', False, 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS ' '(Default false).') # CenterNet-specific flags flags.DEFINE_bool( 'centernet_include_keypoints', False, 'Whether to export the predicted keypoint tensors. Only CenterNet model' ' supports this flag.' ) flags.DEFINE_string( 'keypoint_label_map_path', None, 'Path of the label map used by CenterNet keypoint estimation task. If' ' provided, the label map path in the pipeline config will be replaced by' ' this one. Note that it is only used when exporting CenterNet model for' ' keypoint estimation task.' ) def main(argv): del argv # Unused. flags.mark_flag_as_required('pipeline_config_path') flags.mark_flag_as_required('trained_checkpoint_dir') flags.mark_flag_as_required('output_directory') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.io.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Parse(f.read(), pipeline_config) override_config = pipeline_pb2.TrainEvalPipelineConfig() text_format.Parse(FLAGS.config_override, override_config) pipeline_config.MergeFrom(override_config) export_tflite_graph_lib_tf2.export_tflite_model( pipeline_config, FLAGS.trained_checkpoint_dir, FLAGS.output_directory, FLAGS.max_detections, FLAGS.ssd_use_regular_nms, FLAGS.centernet_include_keypoints, FLAGS.keypoint_label_map_path) if __name__ == '__main__': app.run(main)
6,141
37.149068
101
py
models
models-master/research/object_detection/export_tflite_ssd_graph.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Exports an SSD detection model to use with tf-lite. Outputs file: * A tflite compatible frozen graph - $output_directory/tflite_graph.pb The exported graph has the following input and output nodes. Inputs: 'normalized_input_image_tensor': a float32 tensor of shape [1, height, width, 3] containing the normalized input image. Note that the height and width must be compatible with the height and width configured in the fixed_shape_image resizer options in the pipeline config proto. In floating point Mobilenet model, 'normalized_image_tensor' has values between [-1,1). This typically means mapping each pixel (linearly) to a value between [-1, 1]. Input image values between 0 and 255 are scaled by (1/128.0) and then a value of -1 is added to them to ensure the range is [-1,1). In quantized Mobilenet model, 'normalized_image_tensor' has values between [0, 255]. In general, see the `preprocess` function defined in the feature extractor class in the object_detection/models directory. Outputs: If add_postprocessing_op is true: frozen graph adds a TFLite_Detection_PostProcess custom op node has four outputs: detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box locations detection_classes: a float32 tensor of shape [1, num_boxes] with class indices detection_scores: a float32 tensor of shape [1, num_boxes] with class scores num_boxes: a float32 tensor of size 1 containing the number of detected boxes else: the graph has two outputs: 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4] containing the encoded box predictions. 'raw_outputs/class_predictions': a float32 tensor of shape [1, num_anchors, num_classes] containing the class scores for each anchor after applying score conversion. Example Usage: -------------- python object_detection/export_tflite_ssd_graph.py \ --pipeline_config_path path/to/ssd_mobilenet.config \ --trained_checkpoint_prefix path/to/model.ckpt \ --output_directory path/to/exported_model_directory The expected output would be in the directory path/to/exported_model_directory (which is created if it does not exist) with contents: - tflite_graph.pbtxt - tflite_graph.pb Config overrides (see the `config_override` flag) are text protobufs (also of type pipeline_pb2.TrainEvalPipelineConfig) which are used to override certain fields in the provided pipeline_config_path. These are useful for making small changes to the inference graph that differ from the training or eval config. Example Usage (in which we change the NMS iou_threshold to be 0.5 and NMS score_threshold to be 0.0): python object_detection/export_tflite_ssd_graph.py \ --pipeline_config_path path/to/ssd_mobilenet.config \ --trained_checkpoint_prefix path/to/model.ckpt \ --output_directory path/to/exported_model_directory --config_override " \ model{ \ ssd{ \ post_processing { \ batch_non_max_suppression { \ score_threshold: 0.0 \ iou_threshold: 0.5 \ } \ } \ } \ } \ " """ import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection import export_tflite_ssd_graph_lib from object_detection.protos import pipeline_pb2 flags = tf.app.flags flags.DEFINE_string('output_directory', None, 'Path to write outputs.') flags.DEFINE_string( 'pipeline_config_path', None, 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file.') flags.DEFINE_string('trained_checkpoint_prefix', None, 'Checkpoint prefix.') flags.DEFINE_integer('max_detections', 10, 'Maximum number of detections (boxes) to show.') flags.DEFINE_integer('max_classes_per_detection', 1, 'Maximum number of classes to output per detection box.') flags.DEFINE_integer( 'detections_per_class', 100, 'Number of anchors used per class in Regular Non-Max-Suppression.') flags.DEFINE_bool('add_postprocessing_op', True, 'Add TFLite custom op for postprocessing to the graph.') flags.DEFINE_bool( 'use_regular_nms', False, 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.') flags.DEFINE_string( 'config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' 'text proto to override pipeline_config_path.') FLAGS = flags.FLAGS def main(argv): del argv # Unused. flags.mark_flag_as_required('output_directory') flags.mark_flag_as_required('pipeline_config_path') flags.mark_flag_as_required('trained_checkpoint_prefix') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) text_format.Merge(FLAGS.config_override, pipeline_config) export_tflite_ssd_graph_lib.export_tflite_graph( pipeline_config, FLAGS.trained_checkpoint_prefix, FLAGS.output_directory, FLAGS.add_postprocessing_op, FLAGS.max_detections, FLAGS.max_classes_per_detection, use_regular_nms=FLAGS.use_regular_nms) if __name__ == '__main__': tf.app.run(main)
5,866
39.743056
80
py
models
models-master/research/object_detection/inputs.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model input function for tf-learn object detection model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from object_detection.builders import dataset_builder from object_detection.builders import image_resizer_builder from object_detection.builders import model_builder from object_detection.builders import preprocessor_builder from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import densepose_ops from object_detection.core import keypoint_ops from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.data_decoders import tf_example_decoder from object_detection.protos import eval_pb2 from object_detection.protos import image_resizer_pb2 from object_detection.protos import input_reader_pb2 from object_detection.protos import model_pb2 from object_detection.protos import train_pb2 from object_detection.utils import config_util from object_detection.utils import ops as util_ops from object_detection.utils import shape_utils HASH_KEY = 'hash' HASH_BINS = 1 << 31 SERVING_FED_EXAMPLE_KEY = 'serialized_example' _LABEL_OFFSET = 1 # A map of names to methods that help build the input pipeline. INPUT_BUILDER_UTIL_MAP = { 'dataset_build': dataset_builder.build, 'model_build': model_builder.build, } def _multiclass_scores_or_one_hot_labels(multiclass_scores, groundtruth_boxes, groundtruth_classes, num_classes): """Returns one-hot encoding of classes when multiclass_scores is empty.""" # Replace groundtruth_classes tensor with multiclass_scores tensor when its # non-empty. If multiclass_scores is empty fall back on groundtruth_classes # tensor. def true_fn(): return tf.reshape(multiclass_scores, [tf.shape(groundtruth_boxes)[0], num_classes]) def false_fn(): return tf.one_hot(groundtruth_classes, num_classes) return tf.cond(tf.size(multiclass_scores) > 0, true_fn, false_fn) def convert_labeled_classes_to_k_hot(groundtruth_labeled_classes, num_classes, map_empty_to_ones=False): """Returns k-hot encoding of the labeled classes. If map_empty_to_ones is enabled and the input labeled_classes is empty, this function assumes all classes are exhaustively labeled, thus returning an all-one encoding. Args: groundtruth_labeled_classes: a Tensor holding a sparse representation of labeled classes. num_classes: an integer representing the number of classes map_empty_to_ones: boolean (default: False). Set this to be True to default to an all-ones result if given an empty `groundtruth_labeled_classes`. Returns: A k-hot (and 0-indexed) tensor representation of `groundtruth_labeled_classes`. """ # If the input labeled_classes is empty, it assumes all classes are # exhaustively labeled, thus returning an all-one encoding. def true_fn(): return tf.sparse_to_dense( groundtruth_labeled_classes - _LABEL_OFFSET, [num_classes], tf.constant(1, dtype=tf.float32), validate_indices=False) def false_fn(): return tf.ones(num_classes, dtype=tf.float32) if map_empty_to_ones: return tf.cond(tf.size(groundtruth_labeled_classes) > 0, true_fn, false_fn) return true_fn() def _remove_unrecognized_classes(class_ids, unrecognized_label): """Returns class ids with unrecognized classes filtered out.""" recognized_indices = tf.squeeze( tf.where(tf.greater(class_ids, unrecognized_label)), -1) return tf.gather(class_ids, recognized_indices) def assert_or_prune_invalid_boxes(boxes): """Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin). When the hardware supports assertions, the function raises an error when boxes have an invalid size. If assertions are not supported (e.g. on TPU), boxes with invalid sizes are filtered out. Args: boxes: float tensor of shape [num_boxes, 4] Returns: boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes filtered out. Raises: tf.errors.InvalidArgumentError: When we detect boxes with invalid size. This is not supported on TPUs. """ ymin, xmin, ymax, xmax = tf.split(boxes, num_or_size_splits=4, axis=1) height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax]) width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax]) with tf.control_dependencies([height_check, width_check]): boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1) boxlist = box_list.BoxList(boxes_tensor) # TODO(b/149221748) Remove pruning when XLA supports assertions. boxlist = box_list_ops.prune_small_boxes(boxlist, 0) return boxlist.get() def transform_input_data(tensor_dict, model_preprocess_fn, image_resizer_fn, num_classes, data_augmentation_fn=None, merge_multiple_boxes=False, retain_original_image=False, use_multiclass_scores=False, use_bfloat16=False, retain_original_image_additional_channels=False, keypoint_type_weight=None, image_classes_field_map_empty_to_ones=True): """A single function that is responsible for all input data transformations. Data transformation functions are applied in the following order. 1. If key fields.InputDataFields.image_additional_channels is present in tensor_dict, the additional channels will be merged into fields.InputDataFields.image. 2. data_augmentation_fn (optional): applied on tensor_dict. 3. model_preprocess_fn: applied only on image tensor in tensor_dict. 4. keypoint_type_weight (optional): If groundtruth keypoints are in the tensor dictionary, per-keypoint weights are produced. These weights are initialized by `keypoint_type_weight` (or ones if left None). Then, for all keypoints that are not visible, the weights are set to 0 (to avoid penalizing the model in a loss function). 5. image_resizer_fn: applied on original image and instance mask tensor in tensor_dict. 6. one_hot_encoding: applied to classes tensor in tensor_dict. 7. merge_multiple_boxes (optional): when groundtruth boxes are exactly the same they can be merged into a single box with an associated k-hot class label. Args: tensor_dict: dictionary containing input tensors keyed by fields.InputDataFields. model_preprocess_fn: model's preprocess function to apply on image tensor. This function must take in a 4-D float tensor and return a 4-D preprocess float tensor and a tensor containing the true image shape. image_resizer_fn: image resizer function to apply on groundtruth instance `masks. This function must take a 3-D float tensor of an image and a 3-D tensor of instance masks and return a resized version of these along with the true shapes. num_classes: number of max classes to one-hot (or k-hot) encode the class labels. data_augmentation_fn: (optional) data augmentation function to apply on input `tensor_dict`. merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes and classes for a given image if the boxes are exactly the same. retain_original_image: (optional) whether to retain original image in the output dictionary. use_multiclass_scores: whether to use multiclass scores as class targets instead of one-hot encoding of `groundtruth_classes`. When this is True and multiclass_scores is empty, one-hot encoding of `groundtruth_classes` is used as a fallback. use_bfloat16: (optional) a bool, whether to use bfloat16 in training. retain_original_image_additional_channels: (optional) Whether to retain original image additional channels in the output dictionary. keypoint_type_weight: A list (of length num_keypoints) containing groundtruth loss weights to use for each keypoint. If None, will use a weight of 1. image_classes_field_map_empty_to_ones: A boolean flag indicating if empty image classes field indicates that all classes have been labeled on this image [true] or none [false]. Returns: A dictionary keyed by fields.InputDataFields containing the tensors obtained after applying all the transformations. Raises: KeyError: If both groundtruth_labeled_classes and groundtruth_image_classes are provided by the decoder in tensor_dict since both fields are considered to contain the same information. """ out_tensor_dict = tensor_dict.copy() input_fields = fields.InputDataFields labeled_classes_field = input_fields.groundtruth_labeled_classes image_classes_field = input_fields.groundtruth_image_classes verified_neg_classes_field = input_fields.groundtruth_verified_neg_classes not_exhaustive_field = input_fields.groundtruth_not_exhaustive_classes if (labeled_classes_field in out_tensor_dict and image_classes_field in out_tensor_dict): raise KeyError('groundtruth_labeled_classes and groundtruth_image_classes' 'are provided by the decoder, but only one should be set.') for field, map_empty_to_ones in [(labeled_classes_field, True), (image_classes_field, image_classes_field_map_empty_to_ones), (verified_neg_classes_field, False), (not_exhaustive_field, False)]: if field in out_tensor_dict: out_tensor_dict[field] = _remove_unrecognized_classes( out_tensor_dict[field], unrecognized_label=-1) out_tensor_dict[field] = convert_labeled_classes_to_k_hot( out_tensor_dict[field], num_classes, map_empty_to_ones) if input_fields.multiclass_scores in out_tensor_dict: out_tensor_dict[ input_fields .multiclass_scores] = _multiclass_scores_or_one_hot_labels( out_tensor_dict[input_fields.multiclass_scores], out_tensor_dict[input_fields.groundtruth_boxes], out_tensor_dict[input_fields.groundtruth_classes], num_classes) if input_fields.groundtruth_boxes in out_tensor_dict: out_tensor_dict = util_ops.filter_groundtruth_with_nan_box_coordinates( out_tensor_dict) out_tensor_dict = util_ops.filter_unrecognized_classes(out_tensor_dict) if retain_original_image: out_tensor_dict[input_fields.original_image] = tf.cast( image_resizer_fn(out_tensor_dict[input_fields.image], None)[0], tf.uint8) if input_fields.image_additional_channels in out_tensor_dict: channels = out_tensor_dict[input_fields.image_additional_channels] out_tensor_dict[input_fields.image] = tf.concat( [out_tensor_dict[input_fields.image], channels], axis=2) if retain_original_image_additional_channels: out_tensor_dict[ input_fields.image_additional_channels] = tf.cast( image_resizer_fn(channels, None)[0], tf.uint8) # Apply data augmentation ops. if data_augmentation_fn is not None: out_tensor_dict = data_augmentation_fn(out_tensor_dict) # Apply model preprocessing ops and resize instance masks. image = out_tensor_dict[input_fields.image] preprocessed_resized_image, true_image_shape = model_preprocess_fn( tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0)) preprocessed_shape = tf.shape(preprocessed_resized_image) new_height, new_width = preprocessed_shape[1], preprocessed_shape[2] im_box = tf.stack([ 0.0, 0.0, tf.to_float(new_height) / tf.to_float(true_image_shape[0, 0]), tf.to_float(new_width) / tf.to_float(true_image_shape[0, 1]) ]) if input_fields.groundtruth_boxes in tensor_dict: bboxes = out_tensor_dict[input_fields.groundtruth_boxes] boxlist = box_list.BoxList(bboxes) realigned_bboxes = box_list_ops.change_coordinate_frame(boxlist, im_box) realigned_boxes_tensor = realigned_bboxes.get() valid_boxes_tensor = assert_or_prune_invalid_boxes(realigned_boxes_tensor) out_tensor_dict[ input_fields.groundtruth_boxes] = valid_boxes_tensor if input_fields.groundtruth_keypoints in tensor_dict: keypoints = out_tensor_dict[input_fields.groundtruth_keypoints] realigned_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box) out_tensor_dict[ input_fields.groundtruth_keypoints] = realigned_keypoints flds_gt_kpt = input_fields.groundtruth_keypoints flds_gt_kpt_vis = input_fields.groundtruth_keypoint_visibilities flds_gt_kpt_weights = input_fields.groundtruth_keypoint_weights if flds_gt_kpt_vis not in out_tensor_dict: out_tensor_dict[flds_gt_kpt_vis] = tf.ones_like( out_tensor_dict[flds_gt_kpt][:, :, 0], dtype=tf.bool) flds_gt_kpt_depth = fields.InputDataFields.groundtruth_keypoint_depths flds_gt_kpt_depth_weight = ( fields.InputDataFields.groundtruth_keypoint_depth_weights) if flds_gt_kpt_depth in out_tensor_dict: out_tensor_dict[flds_gt_kpt_depth] = out_tensor_dict[flds_gt_kpt_depth] out_tensor_dict[flds_gt_kpt_depth_weight] = out_tensor_dict[ flds_gt_kpt_depth_weight] out_tensor_dict[flds_gt_kpt_weights] = ( keypoint_ops.keypoint_weights_from_visibilities( out_tensor_dict[flds_gt_kpt_vis], keypoint_type_weight)) dp_surface_coords_fld = input_fields.groundtruth_dp_surface_coords if dp_surface_coords_fld in tensor_dict: dp_surface_coords = out_tensor_dict[dp_surface_coords_fld] realigned_dp_surface_coords = densepose_ops.change_coordinate_frame( dp_surface_coords, im_box) out_tensor_dict[dp_surface_coords_fld] = realigned_dp_surface_coords if use_bfloat16: preprocessed_resized_image = tf.cast( preprocessed_resized_image, tf.bfloat16) if input_fields.context_features in out_tensor_dict: out_tensor_dict[input_fields.context_features] = tf.cast( out_tensor_dict[input_fields.context_features], tf.bfloat16) out_tensor_dict[input_fields.image] = tf.squeeze( preprocessed_resized_image, axis=0) out_tensor_dict[input_fields.true_image_shape] = tf.squeeze( true_image_shape, axis=0) if input_fields.groundtruth_instance_masks in out_tensor_dict: masks = out_tensor_dict[input_fields.groundtruth_instance_masks] _, resized_masks, _ = image_resizer_fn(image, masks) if use_bfloat16: resized_masks = tf.cast(resized_masks, tf.bfloat16) out_tensor_dict[ input_fields.groundtruth_instance_masks] = resized_masks zero_indexed_groundtruth_classes = out_tensor_dict[ input_fields.groundtruth_classes] - _LABEL_OFFSET if use_multiclass_scores: out_tensor_dict[ input_fields.groundtruth_classes] = out_tensor_dict[ input_fields.multiclass_scores] else: out_tensor_dict[input_fields.groundtruth_classes] = tf.one_hot( zero_indexed_groundtruth_classes, num_classes) out_tensor_dict.pop(input_fields.multiclass_scores, None) if input_fields.groundtruth_confidences in out_tensor_dict: groundtruth_confidences = out_tensor_dict[ input_fields.groundtruth_confidences] # Map the confidences to the one-hot encoding of classes out_tensor_dict[input_fields.groundtruth_confidences] = ( tf.reshape(groundtruth_confidences, [-1, 1]) * out_tensor_dict[input_fields.groundtruth_classes]) else: groundtruth_confidences = tf.ones_like( zero_indexed_groundtruth_classes, dtype=tf.float32) out_tensor_dict[input_fields.groundtruth_confidences] = ( out_tensor_dict[input_fields.groundtruth_classes]) if merge_multiple_boxes: merged_boxes, merged_classes, merged_confidences, _ = ( util_ops.merge_boxes_with_multiple_labels( out_tensor_dict[input_fields.groundtruth_boxes], zero_indexed_groundtruth_classes, groundtruth_confidences, num_classes)) merged_classes = tf.cast(merged_classes, tf.float32) out_tensor_dict[input_fields.groundtruth_boxes] = merged_boxes out_tensor_dict[input_fields.groundtruth_classes] = merged_classes out_tensor_dict[input_fields.groundtruth_confidences] = ( merged_confidences) if input_fields.groundtruth_boxes in out_tensor_dict: out_tensor_dict[input_fields.num_groundtruth_boxes] = tf.shape( out_tensor_dict[input_fields.groundtruth_boxes])[0] return out_tensor_dict def pad_input_data_to_static_shapes(tensor_dict, max_num_boxes, num_classes, spatial_image_shape=None, max_num_context_features=None, context_feature_length=None, max_dp_points=336): """Pads input tensors to static shapes. In case num_additional_channels > 0, we assume that the additional channels have already been concatenated to the base image. Args: tensor_dict: Tensor dictionary of input data max_num_boxes: Max number of groundtruth boxes needed to compute shapes for padding. num_classes: Number of classes in the dataset needed to compute shapes for padding. spatial_image_shape: A list of two integers of the form [height, width] containing expected spatial shape of the image. max_num_context_features (optional): The maximum number of context features needed to compute shapes padding. context_feature_length (optional): The length of the context feature. max_dp_points (optional): The maximum number of DensePose sampled points per instance. The default (336) is selected since the original DensePose paper (https://arxiv.org/pdf/1802.00434.pdf) indicates that the maximum number of samples per part is 14, and therefore 24 * 14 = 336 is the maximum sampler per instance. Returns: A dictionary keyed by fields.InputDataFields containing padding shapes for tensors in the dataset. Raises: ValueError: If groundtruth classes is neither rank 1 nor rank 2, or if we detect that additional channels have not been concatenated yet, or if max_num_context_features is not specified and context_features is in the tensor dict. """ if not spatial_image_shape or spatial_image_shape == [-1, -1]: height, width = None, None else: height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence input_fields = fields.InputDataFields num_additional_channels = 0 if input_fields.image_additional_channels in tensor_dict: num_additional_channels = shape_utils.get_dim_as_int(tensor_dict[ input_fields.image_additional_channels].shape[2]) # We assume that if num_additional_channels > 0, then it has already been # concatenated to the base image (but not the ground truth). num_channels = 3 if input_fields.image in tensor_dict: num_channels = shape_utils.get_dim_as_int( tensor_dict[input_fields.image].shape[2]) if num_additional_channels: if num_additional_channels >= num_channels: raise ValueError( 'Image must be already concatenated with additional channels.') if (input_fields.original_image in tensor_dict and shape_utils.get_dim_as_int( tensor_dict[input_fields.original_image].shape[2]) == num_channels): raise ValueError( 'Image must be already concatenated with additional channels.') if input_fields.context_features in tensor_dict and ( max_num_context_features is None): raise ValueError('max_num_context_features must be specified in the model ' 'config if include_context is specified in the input ' 'config') padding_shapes = { input_fields.image: [height, width, num_channels], input_fields.original_image_spatial_shape: [2], input_fields.image_additional_channels: [ height, width, num_additional_channels ], input_fields.source_id: [], input_fields.filename: [], input_fields.key: [], input_fields.groundtruth_difficult: [max_num_boxes], input_fields.groundtruth_boxes: [max_num_boxes, 4], input_fields.groundtruth_classes: [max_num_boxes, num_classes], input_fields.groundtruth_instance_masks: [ max_num_boxes, height, width ], input_fields.groundtruth_instance_mask_weights: [max_num_boxes], input_fields.groundtruth_is_crowd: [max_num_boxes], input_fields.groundtruth_group_of: [max_num_boxes], input_fields.groundtruth_area: [max_num_boxes], input_fields.groundtruth_weights: [max_num_boxes], input_fields.groundtruth_confidences: [ max_num_boxes, num_classes ], input_fields.num_groundtruth_boxes: [], input_fields.groundtruth_label_types: [max_num_boxes], input_fields.groundtruth_label_weights: [max_num_boxes], input_fields.true_image_shape: [3], input_fields.groundtruth_image_classes: [num_classes], input_fields.groundtruth_image_confidences: [num_classes], input_fields.groundtruth_labeled_classes: [num_classes], } if input_fields.original_image in tensor_dict: padding_shapes[input_fields.original_image] = [ height, width, shape_utils.get_dim_as_int(tensor_dict[input_fields. original_image].shape[2]) ] if input_fields.groundtruth_keypoints in tensor_dict: tensor_shape = ( tensor_dict[input_fields.groundtruth_keypoints].shape) padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1]), shape_utils.get_dim_as_int(tensor_shape[2])] padding_shapes[input_fields.groundtruth_keypoints] = padding_shape if input_fields.groundtruth_keypoint_visibilities in tensor_dict: tensor_shape = tensor_dict[input_fields. groundtruth_keypoint_visibilities].shape padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] padding_shapes[input_fields. groundtruth_keypoint_visibilities] = padding_shape if fields.InputDataFields.groundtruth_keypoint_depths in tensor_dict: tensor_shape = tensor_dict[fields.InputDataFields. groundtruth_keypoint_depths].shape padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] padding_shapes[fields.InputDataFields. groundtruth_keypoint_depths] = padding_shape padding_shapes[fields.InputDataFields. groundtruth_keypoint_depth_weights] = padding_shape if input_fields.groundtruth_keypoint_weights in tensor_dict: tensor_shape = ( tensor_dict[input_fields.groundtruth_keypoint_weights].shape) padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] padding_shapes[input_fields. groundtruth_keypoint_weights] = padding_shape if input_fields.groundtruth_dp_num_points in tensor_dict: padding_shapes[ input_fields.groundtruth_dp_num_points] = [max_num_boxes] padding_shapes[ input_fields.groundtruth_dp_part_ids] = [ max_num_boxes, max_dp_points] padding_shapes[ input_fields.groundtruth_dp_surface_coords] = [ max_num_boxes, max_dp_points, 4] if input_fields.groundtruth_track_ids in tensor_dict: padding_shapes[ input_fields.groundtruth_track_ids] = [max_num_boxes] if input_fields.groundtruth_verified_neg_classes in tensor_dict: padding_shapes[ input_fields.groundtruth_verified_neg_classes] = [num_classes] if input_fields.groundtruth_not_exhaustive_classes in tensor_dict: padding_shapes[ input_fields.groundtruth_not_exhaustive_classes] = [num_classes] # Prepare for ContextRCNN related fields. if input_fields.context_features in tensor_dict: padding_shape = [max_num_context_features, context_feature_length] padding_shapes[input_fields.context_features] = padding_shape tensor_shape = tf.shape( tensor_dict[fields.InputDataFields.context_features]) tensor_dict[fields.InputDataFields.valid_context_size] = tensor_shape[0] padding_shapes[fields.InputDataFields.valid_context_size] = [] if fields.InputDataFields.context_feature_length in tensor_dict: padding_shapes[fields.InputDataFields.context_feature_length] = [] if fields.InputDataFields.context_features_image_id_list in tensor_dict: padding_shapes[fields.InputDataFields.context_features_image_id_list] = [ max_num_context_features] if input_fields.is_annotated in tensor_dict: padding_shapes[input_fields.is_annotated] = [] padded_tensor_dict = {} for tensor_name in tensor_dict: padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd( tensor_dict[tensor_name], padding_shapes[tensor_name]) # Make sure that the number of groundtruth boxes now reflects the # padded/clipped tensors. if input_fields.num_groundtruth_boxes in padded_tensor_dict: padded_tensor_dict[input_fields.num_groundtruth_boxes] = ( tf.minimum( padded_tensor_dict[input_fields.num_groundtruth_boxes], max_num_boxes)) return padded_tensor_dict def augment_input_data(tensor_dict, data_augmentation_options): """Applies data augmentation ops to input tensors. Args: tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. data_augmentation_options: A list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. Usually, this is the output of core/preprocessor.build. Returns: A dictionary of tensors obtained by applying data augmentation ops to the input tensor dictionary. """ tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tf.cast(tensor_dict[fields.InputDataFields.image], dtype=tf.float32), 0) include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_instance_mask_weights = ( fields.InputDataFields.groundtruth_instance_mask_weights in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) include_keypoint_visibilities = ( fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict) include_keypoint_depths = ( fields.InputDataFields.groundtruth_keypoint_depths in tensor_dict) include_label_weights = (fields.InputDataFields.groundtruth_weights in tensor_dict) include_label_confidences = (fields.InputDataFields.groundtruth_confidences in tensor_dict) include_multiclass_scores = (fields.InputDataFields.multiclass_scores in tensor_dict) dense_pose_fields = [fields.InputDataFields.groundtruth_dp_num_points, fields.InputDataFields.groundtruth_dp_part_ids, fields.InputDataFields.groundtruth_dp_surface_coords] include_dense_pose = all(field in tensor_dict for field in dense_pose_fields) tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_label_weights=include_label_weights, include_label_confidences=include_label_confidences, include_multiclass_scores=include_multiclass_scores, include_instance_masks=include_instance_masks, include_instance_mask_weights=include_instance_mask_weights, include_keypoints=include_keypoints, include_keypoint_visibilities=include_keypoint_visibilities, include_dense_pose=include_dense_pose, include_keypoint_depths=include_keypoint_depths)) tensor_dict[fields.InputDataFields.image] = tf.squeeze( tensor_dict[fields.InputDataFields.image], axis=0) return tensor_dict def _get_labels_dict(input_dict): """Extracts labels dict from input dict.""" required_label_keys = [ fields.InputDataFields.num_groundtruth_boxes, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_weights, ] labels_dict = {} for key in required_label_keys: labels_dict[key] = input_dict[key] optional_label_keys = [ fields.InputDataFields.groundtruth_confidences, fields.InputDataFields.groundtruth_labeled_classes, fields.InputDataFields.groundtruth_keypoints, fields.InputDataFields.groundtruth_keypoint_depths, fields.InputDataFields.groundtruth_keypoint_depth_weights, fields.InputDataFields.groundtruth_instance_masks, fields.InputDataFields.groundtruth_instance_mask_weights, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_group_of, fields.InputDataFields.groundtruth_difficult, fields.InputDataFields.groundtruth_keypoint_visibilities, fields.InputDataFields.groundtruth_keypoint_weights, fields.InputDataFields.groundtruth_dp_num_points, fields.InputDataFields.groundtruth_dp_part_ids, fields.InputDataFields.groundtruth_dp_surface_coords, fields.InputDataFields.groundtruth_track_ids, fields.InputDataFields.groundtruth_verified_neg_classes, fields.InputDataFields.groundtruth_not_exhaustive_classes, fields.InputDataFields.groundtruth_image_classes, ] for key in optional_label_keys: if key in input_dict: labels_dict[key] = input_dict[key] if fields.InputDataFields.groundtruth_difficult in labels_dict: labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast( labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32) return labels_dict def _replace_empty_string_with_random_number(string_tensor): """Returns string unchanged if non-empty, and random string tensor otherwise. The random string is an integer 0 and 2**63 - 1, casted as string. Args: string_tensor: A tf.tensor of dtype string. Returns: out_string: A tf.tensor of dtype string. If string_tensor contains the empty string, out_string will contain a random integer casted to a string. Otherwise string_tensor is returned unchanged. """ empty_string = tf.constant('', dtype=tf.string, name='EmptyString') random_source_id = tf.as_string( tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64)) out_string = tf.cond( tf.equal(string_tensor, empty_string), true_fn=lambda: random_source_id, false_fn=lambda: string_tensor) return out_string def _get_features_dict(input_dict, include_source_id=False): """Extracts features dict from input dict.""" source_id = _replace_empty_string_with_random_number( input_dict[fields.InputDataFields.source_id]) hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS) features = { fields.InputDataFields.image: input_dict[fields.InputDataFields.image], HASH_KEY: tf.cast(hash_from_source_id, tf.int32), fields.InputDataFields.true_image_shape: input_dict[fields.InputDataFields.true_image_shape], fields.InputDataFields.original_image_spatial_shape: input_dict[fields.InputDataFields.original_image_spatial_shape] } if include_source_id: features[fields.InputDataFields.source_id] = source_id if fields.InputDataFields.original_image in input_dict: features[fields.InputDataFields.original_image] = input_dict[ fields.InputDataFields.original_image] if fields.InputDataFields.image_additional_channels in input_dict: features[fields.InputDataFields.image_additional_channels] = input_dict[ fields.InputDataFields.image_additional_channels] if fields.InputDataFields.context_features in input_dict: features[fields.InputDataFields.context_features] = input_dict[ fields.InputDataFields.context_features] if fields.InputDataFields.valid_context_size in input_dict: features[fields.InputDataFields.valid_context_size] = input_dict[ fields.InputDataFields.valid_context_size] if fields.InputDataFields.context_features_image_id_list in input_dict: features[fields.InputDataFields.context_features_image_id_list] = ( input_dict[fields.InputDataFields.context_features_image_id_list]) return features def create_train_input_fn(train_config, train_input_config, model_config): """Creates a train `input` function for `Estimator`. Args: train_config: A train_pb2.TrainConfig. train_input_config: An input_reader_pb2.InputReader. model_config: A model_pb2.DetectionModel. Returns: `input_fn` for `Estimator` in TRAIN mode. """ def _train_input_fn(params=None): return train_input(train_config, train_input_config, model_config, params=params) return _train_input_fn def train_input(train_config, train_input_config, model_config, model=None, params=None, input_context=None): """Returns `features` and `labels` tensor dictionaries for training. Args: train_config: A train_pb2.TrainConfig. train_input_config: An input_reader_pb2.InputReader. model_config: A model_pb2.DetectionModel. model: A pre-constructed Detection Model. If None, one will be created from the config. params: Parameter dictionary passed from the estimator. input_context: optional, A tf.distribute.InputContext object used to shard filenames and compute per-replica batch_size when this function is being called per-replica. Returns: A tf.data.Dataset that holds (features, labels) tuple. features: Dictionary of feature tensors. features[fields.InputDataFields.image] is a [batch_size, H, W, C] float32 tensor with preprocessed images. features[HASH_KEY] is a [batch_size] int32 tensor representing unique identifiers for the images. features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] int32 tensor representing the true image shapes, as preprocessed images could be padded. features[fields.InputDataFields.original_image] (optional) is a [batch_size, H, W, C] float32 tensor with original images. labels: Dictionary of groundtruth tensors. labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size] int32 tensor indicating the number of groundtruth boxes. labels[fields.InputDataFields.groundtruth_boxes] is a [batch_size, num_boxes, 4] float32 tensor containing the corners of the groundtruth boxes. labels[fields.InputDataFields.groundtruth_classes] is a [batch_size, num_boxes, num_classes] float32 one-hot tensor of classes. labels[fields.InputDataFields.groundtruth_weights] is a [batch_size, num_boxes] float32 tensor containing groundtruth weights for the boxes. -- Optional -- labels[fields.InputDataFields.groundtruth_instance_masks] is a [batch_size, num_boxes, H, W] float32 tensor containing only binary values, which represent instance masks for objects. labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a [batch_size, num_boxes] float32 tensor containing groundtruth weights for each instance mask. labels[fields.InputDataFields.groundtruth_keypoints] is a [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing keypoints for each box. labels[fields.InputDataFields.groundtruth_weights] is a [batch_size, num_boxes, num_keypoints] float32 tensor containing groundtruth weights for the keypoints. labels[fields.InputDataFields.groundtruth_visibilities] is a [batch_size, num_boxes, num_keypoints] bool tensor containing groundtruth visibilities for each keypoint. labels[fields.InputDataFields.groundtruth_labeled_classes] is a [batch_size, num_classes] float32 k-hot tensor of classes. labels[fields.InputDataFields.groundtruth_dp_num_points] is a [batch_size, num_boxes] int32 tensor with the number of sampled DensePose points per object. labels[fields.InputDataFields.groundtruth_dp_part_ids] is a [batch_size, num_boxes, max_sampled_points] int32 tensor with the DensePose part ids (0-indexed) per object. labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a [batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the DensePose surface coordinates. The format is (y, x, v, u), where (y, x) are normalized image coordinates and (v, u) are normalized surface part coordinates. labels[fields.InputDataFields.groundtruth_track_ids] is a [batch_size, num_boxes] int32 tensor with the track ID for each object. Raises: TypeError: if the `train_config`, `train_input_config` or `model_config` are not of the correct type. """ if not isinstance(train_config, train_pb2.TrainConfig): raise TypeError('For training mode, the `train_config` must be a ' 'train_pb2.TrainConfig.') if not isinstance(train_input_config, input_reader_pb2.InputReader): raise TypeError('The `train_input_config` must be a ' 'input_reader_pb2.InputReader.') if not isinstance(model_config, model_pb2.DetectionModel): raise TypeError('The `model_config` must be a ' 'model_pb2.DetectionModel.') if model is None: model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( model_config, is_training=True).preprocess else: model_preprocess_fn = model.preprocess num_classes = config_util.get_number_of_classes(model_config) def transform_and_pad_input_data_fn(tensor_dict): """Combines transform and pad operation.""" data_augmentation_options = [ preprocessor_builder.build(step) for step in train_config.data_augmentation_options ] data_augmentation_fn = functools.partial( augment_input_data, data_augmentation_options=data_augmentation_options) image_resizer_config = config_util.get_image_resizer_config(model_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) keypoint_type_weight = train_input_config.keypoint_type_weight or None transform_data_fn = functools.partial( transform_input_data, model_preprocess_fn=model_preprocess_fn, image_resizer_fn=image_resizer_fn, num_classes=num_classes, data_augmentation_fn=data_augmentation_fn, merge_multiple_boxes=train_config.merge_multiple_label_boxes, retain_original_image=train_config.retain_original_images, use_multiclass_scores=train_config.use_multiclass_scores, use_bfloat16=train_config.use_bfloat16, keypoint_type_weight=keypoint_type_weight) tensor_dict = pad_input_data_to_static_shapes( tensor_dict=transform_data_fn(tensor_dict), max_num_boxes=train_input_config.max_number_of_boxes, num_classes=num_classes, spatial_image_shape=config_util.get_spatial_image_size( image_resizer_config), max_num_context_features=config_util.get_max_num_context_features( model_config), context_feature_length=config_util.get_context_feature_length( model_config)) include_source_id = train_input_config.include_source_id return (_get_features_dict(tensor_dict, include_source_id), _get_labels_dict(tensor_dict)) reduce_to_frame_fn = get_reduce_to_frame_fn(train_input_config, True) dataset = INPUT_BUILDER_UTIL_MAP['dataset_build']( train_input_config, transform_input_data_fn=transform_and_pad_input_data_fn, batch_size=params['batch_size'] if params else train_config.batch_size, input_context=input_context, reduce_to_frame_fn=reduce_to_frame_fn) return dataset def create_eval_input_fn(eval_config, eval_input_config, model_config): """Creates an eval `input` function for `Estimator`. Args: eval_config: An eval_pb2.EvalConfig. eval_input_config: An input_reader_pb2.InputReader. model_config: A model_pb2.DetectionModel. Returns: `input_fn` for `Estimator` in EVAL mode. """ def _eval_input_fn(params=None): return eval_input(eval_config, eval_input_config, model_config, params=params) return _eval_input_fn def eval_input(eval_config, eval_input_config, model_config, model=None, params=None, input_context=None): """Returns `features` and `labels` tensor dictionaries for evaluation. Args: eval_config: An eval_pb2.EvalConfig. eval_input_config: An input_reader_pb2.InputReader. model_config: A model_pb2.DetectionModel. model: A pre-constructed Detection Model. If None, one will be created from the config. params: Parameter dictionary passed from the estimator. input_context: optional, A tf.distribute.InputContext object used to shard filenames and compute per-replica batch_size when this function is being called per-replica. Returns: A tf.data.Dataset that holds (features, labels) tuple. features: Dictionary of feature tensors. features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor with preprocessed images. features[HASH_KEY] is a [1] int32 tensor representing unique identifiers for the images. features[fields.InputDataFields.true_image_shape] is a [1, 3] int32 tensor representing the true image shapes, as preprocessed images could be padded. features[fields.InputDataFields.original_image] is a [1, H', W', C] float32 tensor with the original image. labels: Dictionary of groundtruth tensors. labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4] float32 tensor containing the corners of the groundtruth boxes. labels[fields.InputDataFields.groundtruth_classes] is a [num_boxes, num_classes] float32 one-hot tensor of classes. labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes] float32 tensor containing object areas. labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes] bool tensor indicating if the boxes enclose a crowd. labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes] int32 tensor indicating if the boxes represent difficult instances. -- Optional -- labels[fields.InputDataFields.groundtruth_instance_masks] is a [1, num_boxes, H, W] float32 tensor containing only binary values, which represent instance masks for objects. labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a [1, num_boxes] float32 tensor containing groundtruth weights for each instance mask. labels[fields.InputDataFields.groundtruth_weights] is a [batch_size, num_boxes, num_keypoints] float32 tensor containing groundtruth weights for the keypoints. labels[fields.InputDataFields.groundtruth_visibilities] is a [batch_size, num_boxes, num_keypoints] bool tensor containing groundtruth visibilities for each keypoint. labels[fields.InputDataFields.groundtruth_group_of] is a [1, num_boxes] bool tensor indicating if the box covers more than 5 instances of the same class which heavily occlude each other. labels[fields.InputDataFields.groundtruth_labeled_classes] is a [num_boxes, num_classes] float32 k-hot tensor of classes. labels[fields.InputDataFields.groundtruth_dp_num_points] is a [batch_size, num_boxes] int32 tensor with the number of sampled DensePose points per object. labels[fields.InputDataFields.groundtruth_dp_part_ids] is a [batch_size, num_boxes, max_sampled_points] int32 tensor with the DensePose part ids (0-indexed) per object. labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a [batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the DensePose surface coordinates. The format is (y, x, v, u), where (y, x) are normalized image coordinates and (v, u) are normalized surface part coordinates. labels[fields.InputDataFields.groundtruth_track_ids] is a [batch_size, num_boxes] int32 tensor with the track ID for each object. Raises: TypeError: if the `eval_config`, `eval_input_config` or `model_config` are not of the correct type. """ params = params or {} if not isinstance(eval_config, eval_pb2.EvalConfig): raise TypeError('For eval mode, the `eval_config` must be a ' 'train_pb2.EvalConfig.') if not isinstance(eval_input_config, input_reader_pb2.InputReader): raise TypeError('The `eval_input_config` must be a ' 'input_reader_pb2.InputReader.') if not isinstance(model_config, model_pb2.DetectionModel): raise TypeError('The `model_config` must be a ' 'model_pb2.DetectionModel.') if eval_config.force_no_resize: arch = model_config.WhichOneof('model') arch_config = getattr(model_config, arch) image_resizer_proto = image_resizer_pb2.ImageResizer() image_resizer_proto.identity_resizer.CopyFrom( image_resizer_pb2.IdentityResizer()) arch_config.image_resizer.CopyFrom(image_resizer_proto) if model is None: model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( model_config, is_training=False).preprocess else: model_preprocess_fn = model.preprocess def transform_and_pad_input_data_fn(tensor_dict): """Combines transform and pad operation.""" num_classes = config_util.get_number_of_classes(model_config) image_resizer_config = config_util.get_image_resizer_config(model_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) keypoint_type_weight = eval_input_config.keypoint_type_weight or None transform_data_fn = functools.partial( transform_input_data, model_preprocess_fn=model_preprocess_fn, image_resizer_fn=image_resizer_fn, num_classes=num_classes, data_augmentation_fn=None, retain_original_image=eval_config.retain_original_images, retain_original_image_additional_channels= eval_config.retain_original_image_additional_channels, keypoint_type_weight=keypoint_type_weight, image_classes_field_map_empty_to_ones=eval_config .image_classes_field_map_empty_to_ones) tensor_dict = pad_input_data_to_static_shapes( tensor_dict=transform_data_fn(tensor_dict), max_num_boxes=eval_input_config.max_number_of_boxes, num_classes=config_util.get_number_of_classes(model_config), spatial_image_shape=config_util.get_spatial_image_size( image_resizer_config), max_num_context_features=config_util.get_max_num_context_features( model_config), context_feature_length=config_util.get_context_feature_length( model_config)) include_source_id = eval_input_config.include_source_id return (_get_features_dict(tensor_dict, include_source_id), _get_labels_dict(tensor_dict)) reduce_to_frame_fn = get_reduce_to_frame_fn(eval_input_config, False) dataset = INPUT_BUILDER_UTIL_MAP['dataset_build']( eval_input_config, batch_size=params['batch_size'] if params else eval_config.batch_size, transform_input_data_fn=transform_and_pad_input_data_fn, input_context=input_context, reduce_to_frame_fn=reduce_to_frame_fn) return dataset def create_predict_input_fn(model_config, predict_input_config): """Creates a predict `input` function for `Estimator`. Args: model_config: A model_pb2.DetectionModel. predict_input_config: An input_reader_pb2.InputReader. Returns: `input_fn` for `Estimator` in PREDICT mode. """ def _predict_input_fn(params=None): """Decodes serialized tf.Examples and returns `ServingInputReceiver`. Args: params: Parameter dictionary passed from the estimator. Returns: `ServingInputReceiver`. """ del params example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example') num_classes = config_util.get_number_of_classes(model_config) model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( model_config, is_training=False).preprocess image_resizer_config = config_util.get_image_resizer_config(model_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) transform_fn = functools.partial( transform_input_data, model_preprocess_fn=model_preprocess_fn, image_resizer_fn=image_resizer_fn, num_classes=num_classes, data_augmentation_fn=None) decoder = tf_example_decoder.TfExampleDecoder( load_instance_masks=False, num_additional_channels=predict_input_config.num_additional_channels) input_dict = transform_fn(decoder.decode(example)) images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32) images = tf.expand_dims(images, axis=0) true_image_shape = tf.expand_dims( input_dict[fields.InputDataFields.true_image_shape], axis=0) return tf_estimator.export.ServingInputReceiver( features={ fields.InputDataFields.image: images, fields.InputDataFields.true_image_shape: true_image_shape}, receiver_tensors={SERVING_FED_EXAMPLE_KEY: example}) return _predict_input_fn def get_reduce_to_frame_fn(input_reader_config, is_training): """Returns a function reducing sequence tensors to single frame tensors. If the input type is not TF_SEQUENCE_EXAMPLE, the tensors are passed through this function unchanged. Otherwise, when in training mode, a single frame is selected at random from the sequence example, and the tensors for that frame are converted to single frame tensors, with all associated context features. In evaluation mode all frames are converted to single frame tensors with copied context tensors. After the sequence example tensors are converted into one or many single frame tensors, the images from each frame are decoded. Args: input_reader_config: An input_reader_pb2.InputReader. is_training: Whether we are in training mode. Returns: `reduce_to_frame_fn` for the dataset builder """ if input_reader_config.input_type != ( input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE')): return lambda dataset, dataset_map_fn, batch_size, config: dataset else: def reduce_to_frame(dataset, dataset_map_fn, batch_size, input_reader_config): """Returns a function reducing sequence tensors to single frame tensors. Args: dataset: A tf dataset containing sequence tensors. dataset_map_fn: A function that handles whether to map_with_legacy_function for this dataset batch_size: used if map_with_legacy_function is true to determine num_parallel_calls input_reader_config: used if map_with_legacy_function is true to determine num_parallel_calls Returns: A tf dataset containing single frame tensors. """ if is_training: def get_single_frame(tensor_dict): """Returns a random frame from a sequence. Picks a random frame and returns slices of sequence tensors corresponding to the random frame. Returns non-sequence tensors unchanged. Args: tensor_dict: A dictionary containing sequence tensors. Returns: Tensors for a single random frame within the sequence. """ num_frames = tf.cast( tf.shape(tensor_dict[fields.InputDataFields.source_id])[0], dtype=tf.int32) if input_reader_config.frame_index == -1: frame_index = tf.random.uniform((), minval=0, maxval=num_frames, dtype=tf.int32) else: frame_index = tf.constant(input_reader_config.frame_index, dtype=tf.int32) out_tensor_dict = {} for key in tensor_dict: if key in fields.SEQUENCE_FIELDS: # Slice random frame from sequence tensors out_tensor_dict[key] = tensor_dict[key][frame_index] else: # Copy all context tensors. out_tensor_dict[key] = tensor_dict[key] return out_tensor_dict dataset = dataset_map_fn(dataset, get_single_frame, batch_size, input_reader_config) else: dataset = dataset_map_fn(dataset, util_ops.tile_context_tensors, batch_size, input_reader_config) dataset = dataset.unbatch() # Decode frame here as SequenceExample tensors contain encoded images. dataset = dataset_map_fn(dataset, util_ops.decode_image, batch_size, input_reader_config) return dataset return reduce_to_frame
54,732
44.047737
81
py
models
models-master/research/object_detection/__init__.py
0
0
0
py
models
models-master/research/object_detection/model_lib.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Constructs model, inputs, and training environment.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import functools import os import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator import tensorflow.compat.v2 as tf2 import tf_slim as slim from object_detection import eval_util from object_detection import exporter as exporter_lib from object_detection import inputs from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.builders import optimizer_builder from object_detection.core import standard_fields as fields from object_detection.utils import config_util from object_detection.utils import label_map_util from object_detection.utils import ops from object_detection.utils import shape_utils from object_detection.utils import variables_helper from object_detection.utils import visualization_utils as vis_utils # pylint: disable=g-import-not-at-top try: from tensorflow.contrib import learn as contrib_learn except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top # A map of names to methods that help build the model. MODEL_BUILD_UTIL_MAP = { 'get_configs_from_pipeline_file': config_util.get_configs_from_pipeline_file, 'create_pipeline_proto_from_configs': config_util.create_pipeline_proto_from_configs, 'merge_external_params_with_configs': config_util.merge_external_params_with_configs, 'create_train_input_fn': inputs.create_train_input_fn, 'create_eval_input_fn': inputs.create_eval_input_fn, 'create_predict_input_fn': inputs.create_predict_input_fn, 'detection_model_fn_base': model_builder.build, } def _prepare_groundtruth_for_eval(detection_model, class_agnostic, max_number_of_boxes): """Extracts groundtruth data from detection_model and prepares it for eval. Args: detection_model: A `DetectionModel` object. class_agnostic: Whether the detections are class_agnostic. max_number_of_boxes: Max number of groundtruth boxes. Returns: A tuple of: groundtruth: Dictionary with the following fields: 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in normalized coordinates. 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed classes. 'groundtruth_masks': 4D float32 tensor of instance masks (if provided in groundtruth) 'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating is_crowd annotations (if provided in groundtruth). 'groundtruth_area': [batch_size, num_boxes] float32 tensor indicating the area (in the original absolute coordinates) of annotations (if provided in groundtruth). 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number of groundtruth boxes per image.. 'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32 tensor of keypoints (if provided in groundtruth). 'groundtruth_dp_num_points_list': [batch_size, num_boxes] int32 tensor with the number of DensePose points for each instance (if provided in groundtruth). 'groundtruth_dp_part_ids_list': [batch_size, num_boxes, max_sampled_points] int32 tensor with the part ids for each DensePose sampled point (if provided in groundtruth). 'groundtruth_dp_surface_coords_list': [batch_size, num_boxes, max_sampled_points, 4] containing the DensePose surface coordinates for each sampled point (if provided in groundtruth). 'groundtruth_track_ids_list': [batch_size, num_boxes] int32 tensor with track ID for each instance (if provided in groundtruth). 'groundtruth_group_of': [batch_size, num_boxes] bool tensor indicating group_of annotations (if provided in groundtruth). 'groundtruth_labeled_classes': [batch_size, num_classes] int64 tensor of 1-indexed classes. 'groundtruth_verified_neg_classes': [batch_size, num_classes] float32 K-hot representation of 1-indexed classes which were verified as not present in the image. 'groundtruth_not_exhaustive_classes': [batch_size, num_classes] K-hot representation of 1-indexed classes which don't have all of their instances marked exhaustively. 'input_data_fields.groundtruth_image_classes': integer representation of the classes that were sent for verification for a given image. Note that this field does not support batching as the number of classes can be variable. class_agnostic: Boolean indicating whether detections are class agnostic. """ input_data_fields = fields.InputDataFields() groundtruth_boxes = tf.stack( detection_model.groundtruth_lists(fields.BoxListFields.boxes)) groundtruth_boxes_shape = tf.shape(groundtruth_boxes) # For class-agnostic models, groundtruth one-hot encodings collapse to all # ones. if class_agnostic: groundtruth_classes_one_hot = tf.ones( [groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1]) else: groundtruth_classes_one_hot = tf.stack( detection_model.groundtruth_lists(fields.BoxListFields.classes)) label_id_offset = 1 # Applying label id offset (b/63711816) groundtruth_classes = ( tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset) groundtruth = { input_data_fields.groundtruth_boxes: groundtruth_boxes, input_data_fields.groundtruth_classes: groundtruth_classes } if detection_model.groundtruth_has_field( input_data_fields.groundtruth_image_classes): groundtruth_image_classes_k_hot = tf.stack( detection_model.groundtruth_lists( input_data_fields.groundtruth_image_classes)) groundtruth_image_classes = tf.expand_dims( tf.where(groundtruth_image_classes_k_hot > 0)[:, 1], 0) # Adds back label_id_offset as it is subtracted in # convert_labeled_classes_to_k_hot. groundtruth[ input_data_fields. groundtruth_image_classes] = groundtruth_image_classes + label_id_offset if detection_model.groundtruth_has_field(fields.BoxListFields.masks): groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack( detection_model.groundtruth_lists(fields.BoxListFields.masks)) if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd): groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack( detection_model.groundtruth_lists(fields.BoxListFields.is_crowd)) if detection_model.groundtruth_has_field(input_data_fields.groundtruth_area): groundtruth[input_data_fields.groundtruth_area] = tf.stack( detection_model.groundtruth_lists(input_data_fields.groundtruth_area)) if detection_model.groundtruth_has_field(fields.BoxListFields.keypoints): groundtruth[input_data_fields.groundtruth_keypoints] = tf.stack( detection_model.groundtruth_lists(fields.BoxListFields.keypoints)) if detection_model.groundtruth_has_field( fields.BoxListFields.keypoint_depths): groundtruth[input_data_fields.groundtruth_keypoint_depths] = tf.stack( detection_model.groundtruth_lists(fields.BoxListFields.keypoint_depths)) groundtruth[ input_data_fields.groundtruth_keypoint_depth_weights] = tf.stack( detection_model.groundtruth_lists( fields.BoxListFields.keypoint_depth_weights)) if detection_model.groundtruth_has_field( fields.BoxListFields.keypoint_visibilities): groundtruth[input_data_fields.groundtruth_keypoint_visibilities] = tf.stack( detection_model.groundtruth_lists( fields.BoxListFields.keypoint_visibilities)) if detection_model.groundtruth_has_field(fields.BoxListFields.group_of): groundtruth[input_data_fields.groundtruth_group_of] = tf.stack( detection_model.groundtruth_lists(fields.BoxListFields.group_of)) label_id_offset_paddings = tf.constant([[0, 0], [1, 0]]) if detection_model.groundtruth_has_field( input_data_fields.groundtruth_verified_neg_classes): groundtruth[input_data_fields.groundtruth_verified_neg_classes] = tf.pad( tf.stack( detection_model.groundtruth_lists( input_data_fields.groundtruth_verified_neg_classes)), label_id_offset_paddings) if detection_model.groundtruth_has_field( input_data_fields.groundtruth_not_exhaustive_classes): groundtruth[input_data_fields.groundtruth_not_exhaustive_classes] = tf.pad( tf.stack( detection_model.groundtruth_lists( input_data_fields.groundtruth_not_exhaustive_classes)), label_id_offset_paddings) if detection_model.groundtruth_has_field( fields.BoxListFields.densepose_num_points): groundtruth[input_data_fields.groundtruth_dp_num_points] = tf.stack( detection_model.groundtruth_lists( fields.BoxListFields.densepose_num_points)) if detection_model.groundtruth_has_field( fields.BoxListFields.densepose_part_ids): groundtruth[input_data_fields.groundtruth_dp_part_ids] = tf.stack( detection_model.groundtruth_lists( fields.BoxListFields.densepose_part_ids)) if detection_model.groundtruth_has_field( fields.BoxListFields.densepose_surface_coords): groundtruth[input_data_fields.groundtruth_dp_surface_coords] = tf.stack( detection_model.groundtruth_lists( fields.BoxListFields.densepose_surface_coords)) if detection_model.groundtruth_has_field(fields.BoxListFields.track_ids): groundtruth[input_data_fields.groundtruth_track_ids] = tf.stack( detection_model.groundtruth_lists(fields.BoxListFields.track_ids)) if detection_model.groundtruth_has_field( input_data_fields.groundtruth_labeled_classes): groundtruth[input_data_fields.groundtruth_labeled_classes] = tf.pad( tf.stack( detection_model.groundtruth_lists( input_data_fields.groundtruth_labeled_classes)), label_id_offset_paddings) groundtruth[input_data_fields.num_groundtruth_boxes] = ( tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]])) return groundtruth def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True): """Unstacks all tensors in `tensor_dict` along 0th dimension. Unstacks tensor from the tensor dict along 0th dimension and returns a tensor_dict containing values that are lists of unstacked, unpadded tensors. Tensors in the `tensor_dict` are expected to be of one of the three shapes: 1. [batch_size] 2. [batch_size, height, width, channels] 3. [batch_size, num_boxes, d1, d2, ... dn] When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3 above are sliced along the `num_boxes` dimension using the value in tensor field.InputDataFields.num_groundtruth_boxes. Note that this function has a static list of input data fields and has to be kept in sync with the InputDataFields defined in core/standard_fields.py Args: tensor_dict: A dictionary of batched groundtruth tensors. unpad_groundtruth_tensors: Whether to remove padding along `num_boxes` dimension of the groundtruth tensors. Returns: A dictionary where the keys are from fields.InputDataFields and values are a list of unstacked (optionally unpadded) tensors. Raises: ValueError: If unpad_tensors is True and `tensor_dict` does not contain `num_groundtruth_boxes` tensor. """ unbatched_tensor_dict = { key: tf.unstack(tensor) for key, tensor in tensor_dict.items() } if unpad_groundtruth_tensors: if (fields.InputDataFields.num_groundtruth_boxes not in unbatched_tensor_dict): raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. ' 'Keys available: {}'.format( unbatched_tensor_dict.keys())) unbatched_unpadded_tensor_dict = {} unpad_keys = set([ # List of input data fields that are padded along the num_boxes # dimension. This list has to be kept in sync with InputDataFields in # standard_fields.py. fields.InputDataFields.groundtruth_instance_masks, fields.InputDataFields.groundtruth_instance_mask_weights, fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_keypoints, fields.InputDataFields.groundtruth_keypoint_depths, fields.InputDataFields.groundtruth_keypoint_depth_weights, fields.InputDataFields.groundtruth_keypoint_visibilities, fields.InputDataFields.groundtruth_dp_num_points, fields.InputDataFields.groundtruth_dp_part_ids, fields.InputDataFields.groundtruth_dp_surface_coords, fields.InputDataFields.groundtruth_track_ids, fields.InputDataFields.groundtruth_group_of, fields.InputDataFields.groundtruth_difficult, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_weights ]).intersection(set(unbatched_tensor_dict.keys())) for key in unpad_keys: unpadded_tensor_list = [] for num_gt, padded_tensor in zip( unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes], unbatched_tensor_dict[key]): tensor_shape = shape_utils.combined_static_and_dynamic_shape( padded_tensor) slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32) slice_size = tf.stack( [num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]]) unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size) unpadded_tensor_list.append(unpadded_tensor) unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict) return unbatched_tensor_dict def provide_groundtruth(model, labels, training_step=None): """Provides the labels to a model as groundtruth. This helper function extracts the corresponding boxes, classes, keypoints, weights, masks, etc. from the labels, and provides it as groundtruth to the models. Args: model: The detection model to provide groundtruth to. labels: The labels for the training or evaluation inputs. training_step: int, optional. The training step for the model. Useful for models which want to anneal loss weights. """ gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes] gt_classes_list = labels[fields.InputDataFields.groundtruth_classes] gt_masks_list = None if fields.InputDataFields.groundtruth_instance_masks in labels: gt_masks_list = labels[fields.InputDataFields.groundtruth_instance_masks] gt_mask_weights_list = None if fields.InputDataFields.groundtruth_instance_mask_weights in labels: gt_mask_weights_list = labels[ fields.InputDataFields.groundtruth_instance_mask_weights] gt_keypoints_list = None if fields.InputDataFields.groundtruth_keypoints in labels: gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints] gt_keypoint_depths_list = None gt_keypoint_depth_weights_list = None if fields.InputDataFields.groundtruth_keypoint_depths in labels: gt_keypoint_depths_list = ( labels[fields.InputDataFields.groundtruth_keypoint_depths]) gt_keypoint_depth_weights_list = ( labels[fields.InputDataFields.groundtruth_keypoint_depth_weights]) gt_keypoint_visibilities_list = None if fields.InputDataFields.groundtruth_keypoint_visibilities in labels: gt_keypoint_visibilities_list = labels[ fields.InputDataFields.groundtruth_keypoint_visibilities] gt_dp_num_points_list = None if fields.InputDataFields.groundtruth_dp_num_points in labels: gt_dp_num_points_list = labels[ fields.InputDataFields.groundtruth_dp_num_points] gt_dp_part_ids_list = None if fields.InputDataFields.groundtruth_dp_part_ids in labels: gt_dp_part_ids_list = labels[fields.InputDataFields.groundtruth_dp_part_ids] gt_dp_surface_coords_list = None if fields.InputDataFields.groundtruth_dp_surface_coords in labels: gt_dp_surface_coords_list = labels[ fields.InputDataFields.groundtruth_dp_surface_coords] gt_track_ids_list = None if fields.InputDataFields.groundtruth_track_ids in labels: gt_track_ids_list = labels[fields.InputDataFields.groundtruth_track_ids] gt_weights_list = None if fields.InputDataFields.groundtruth_weights in labels: gt_weights_list = labels[fields.InputDataFields.groundtruth_weights] gt_confidences_list = None if fields.InputDataFields.groundtruth_confidences in labels: gt_confidences_list = labels[fields.InputDataFields.groundtruth_confidences] gt_is_crowd_list = None if fields.InputDataFields.groundtruth_is_crowd in labels: gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd] gt_group_of_list = None if fields.InputDataFields.groundtruth_group_of in labels: gt_group_of_list = labels[fields.InputDataFields.groundtruth_group_of] gt_area_list = None if fields.InputDataFields.groundtruth_area in labels: gt_area_list = labels[fields.InputDataFields.groundtruth_area] gt_labeled_classes = None if fields.InputDataFields.groundtruth_labeled_classes in labels: gt_labeled_classes = labels[ fields.InputDataFields.groundtruth_labeled_classes] gt_verified_neg_classes = None if fields.InputDataFields.groundtruth_verified_neg_classes in labels: gt_verified_neg_classes = labels[ fields.InputDataFields.groundtruth_verified_neg_classes] gt_not_exhaustive_classes = None if fields.InputDataFields.groundtruth_not_exhaustive_classes in labels: gt_not_exhaustive_classes = labels[ fields.InputDataFields.groundtruth_not_exhaustive_classes] groundtruth_image_classes = None if fields.InputDataFields.groundtruth_image_classes in labels: groundtruth_image_classes = labels[ fields.InputDataFields.groundtruth_image_classes] model.provide_groundtruth( groundtruth_boxes_list=gt_boxes_list, groundtruth_classes_list=gt_classes_list, groundtruth_confidences_list=gt_confidences_list, groundtruth_labeled_classes=gt_labeled_classes, groundtruth_masks_list=gt_masks_list, groundtruth_mask_weights_list=gt_mask_weights_list, groundtruth_keypoints_list=gt_keypoints_list, groundtruth_keypoint_visibilities_list=gt_keypoint_visibilities_list, groundtruth_dp_num_points_list=gt_dp_num_points_list, groundtruth_dp_part_ids_list=gt_dp_part_ids_list, groundtruth_dp_surface_coords_list=gt_dp_surface_coords_list, groundtruth_weights_list=gt_weights_list, groundtruth_is_crowd_list=gt_is_crowd_list, groundtruth_group_of_list=gt_group_of_list, groundtruth_area_list=gt_area_list, groundtruth_track_ids_list=gt_track_ids_list, groundtruth_verified_neg_classes=gt_verified_neg_classes, groundtruth_not_exhaustive_classes=gt_not_exhaustive_classes, groundtruth_keypoint_depths_list=gt_keypoint_depths_list, groundtruth_keypoint_depth_weights_list=gt_keypoint_depth_weights_list, groundtruth_image_classes=groundtruth_image_classes, training_step=training_step) def create_model_fn(detection_model_fn, configs, hparams=None, use_tpu=False, postprocess_on_cpu=False): """Creates a model function for `Estimator`. Args: detection_model_fn: Function that returns a `DetectionModel` instance. configs: Dictionary of pipeline config objects. hparams: `HParams` object. use_tpu: Boolean indicating whether model should be constructed for use on TPU. postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess is scheduled on the host cpu. Returns: `model_fn` for `Estimator`. """ train_config = configs['train_config'] eval_input_config = configs['eval_input_config'] eval_config = configs['eval_config'] def model_fn(features, labels, mode, params=None): """Constructs the object detection model. Args: features: Dictionary of feature tensors, returned from `input_fn`. labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL, otherwise None. mode: Mode key from tf.estimator.ModeKeys. params: Parameter dictionary passed from the estimator. Returns: An `EstimatorSpec` that encapsulates the model and its serving configurations. """ params = params or {} total_loss, train_op, detections, export_outputs = None, None, None, None is_training = mode == tf_estimator.ModeKeys.TRAIN # Make sure to set the Keras learning phase. True during training, # False for inference. tf.keras.backend.set_learning_phase(is_training) # Set policy for mixed-precision training with Keras-based models. if use_tpu and train_config.use_bfloat16: # Enable v2 behavior, as `mixed_bfloat16` is only supported in TF 2.0. tf.keras.layers.enable_v2_dtype_behavior() tf2.keras.mixed_precision.set_global_policy('mixed_bfloat16') detection_model = detection_model_fn( is_training=is_training, add_summaries=(not use_tpu)) scaffold_fn = None if mode == tf_estimator.ModeKeys.TRAIN: labels = unstack_batch( labels, unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors) elif mode == tf_estimator.ModeKeys.EVAL: # For evaling on train data, it is necessary to check whether groundtruth # must be unpadded. boxes_shape = ( labels[ fields.InputDataFields.groundtruth_boxes].get_shape().as_list()) unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu labels = unstack_batch( labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors) if mode in (tf_estimator.ModeKeys.TRAIN, tf_estimator.ModeKeys.EVAL): provide_groundtruth(detection_model, labels) preprocessed_images = features[fields.InputDataFields.image] side_inputs = detection_model.get_side_inputs(features) if use_tpu and train_config.use_bfloat16: with tf.tpu.bfloat16_scope(): prediction_dict = detection_model.predict( preprocessed_images, features[fields.InputDataFields.true_image_shape], **side_inputs) prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict) else: prediction_dict = detection_model.predict( preprocessed_images, features[fields.InputDataFields.true_image_shape], **side_inputs) def postprocess_wrapper(args): return detection_model.postprocess(args[0], args[1]) if mode in (tf_estimator.ModeKeys.EVAL, tf_estimator.ModeKeys.PREDICT): if use_tpu and postprocess_on_cpu: detections = tf.tpu.outside_compilation( postprocess_wrapper, (prediction_dict, features[fields.InputDataFields.true_image_shape])) else: detections = postprocess_wrapper( (prediction_dict, features[fields.InputDataFields.true_image_shape])) if mode == tf_estimator.ModeKeys.TRAIN: load_pretrained = hparams.load_pretrained if hparams else False if train_config.fine_tune_checkpoint and load_pretrained: if not train_config.fine_tune_checkpoint_type: # train_config.from_detection_checkpoint field is deprecated. For # backward compatibility, set train_config.fine_tune_checkpoint_type # based on train_config.from_detection_checkpoint. if train_config.from_detection_checkpoint: train_config.fine_tune_checkpoint_type = 'detection' else: train_config.fine_tune_checkpoint_type = 'classification' asg_map = detection_model.restore_map( fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type, load_all_detection_checkpoint_vars=( train_config.load_all_detection_checkpoint_vars)) available_var_map = ( variables_helper.get_variables_available_in_checkpoint( asg_map, train_config.fine_tune_checkpoint, include_global_step=False)) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint, available_var_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint, available_var_map) if mode in (tf_estimator.ModeKeys.TRAIN, tf_estimator.ModeKeys.EVAL): if (mode == tf_estimator.ModeKeys.EVAL and eval_config.use_dummy_loss_in_eval): total_loss = tf.constant(1.0) losses_dict = {'Loss/total_loss': total_loss} else: losses_dict = detection_model.loss( prediction_dict, features[fields.InputDataFields.true_image_shape]) losses = [loss_tensor for loss_tensor in losses_dict.values()] if train_config.add_regularization_loss: regularization_losses = detection_model.regularization_losses() if use_tpu and train_config.use_bfloat16: regularization_losses = ops.bfloat16_to_float32_nested( regularization_losses) if regularization_losses: regularization_loss = tf.add_n( regularization_losses, name='regularization_loss') losses.append(regularization_loss) losses_dict['Loss/regularization_loss'] = regularization_loss total_loss = tf.add_n(losses, name='total_loss') losses_dict['Loss/total_loss'] = total_loss if 'graph_rewriter_config' in configs: graph_rewriter_fn = graph_rewriter_builder.build( configs['graph_rewriter_config'], is_training=is_training) graph_rewriter_fn() # TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we # can write learning rate summaries on TPU without host calls. global_step = tf.train.get_or_create_global_step() training_optimizer, optimizer_summary_vars = optimizer_builder.build( train_config.optimizer) if mode == tf_estimator.ModeKeys.TRAIN: if use_tpu: training_optimizer = tf.tpu.CrossShardOptimizer(training_optimizer) # Optionally freeze some layers by setting their gradients to be zero. trainable_variables = None include_variables = ( train_config.update_trainable_variables if train_config.update_trainable_variables else None) exclude_variables = ( train_config.freeze_variables if train_config.freeze_variables else None) trainable_variables = slim.filter_variables( tf.trainable_variables(), include_patterns=include_variables, exclude_patterns=exclude_variables) clip_gradients_value = None if train_config.gradient_clipping_by_norm > 0: clip_gradients_value = train_config.gradient_clipping_by_norm if not use_tpu: for var in optimizer_summary_vars: tf.summary.scalar(var.op.name, var) summaries = [] if use_tpu else None if train_config.summarize_gradients: summaries = ['gradients', 'gradient_norm', 'global_gradient_norm'] train_op = slim.optimizers.optimize_loss( loss=total_loss, global_step=global_step, learning_rate=None, clip_gradients=clip_gradients_value, optimizer=training_optimizer, update_ops=detection_model.updates(), variables=trainable_variables, summaries=summaries, name='') # Preventing scope prefix on all variables. if mode == tf_estimator.ModeKeys.PREDICT: exported_output = exporter_lib.add_output_tensor_nodes(detections) export_outputs = { tf.saved_model.signature_constants.PREDICT_METHOD_NAME: tf_estimator.export.PredictOutput(exported_output) } eval_metric_ops = None scaffold = None if mode == tf_estimator.ModeKeys.EVAL: class_agnostic = ( fields.DetectionResultFields.detection_classes not in detections) groundtruth = _prepare_groundtruth_for_eval( detection_model, class_agnostic, eval_input_config.max_number_of_boxes) use_original_images = fields.InputDataFields.original_image in features if use_original_images: eval_images = features[fields.InputDataFields.original_image] true_image_shapes = tf.slice( features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3]) original_image_spatial_shapes = features[ fields.InputDataFields.original_image_spatial_shape] else: eval_images = features[fields.InputDataFields.image] true_image_shapes = None original_image_spatial_shapes = None eval_dict = eval_util.result_dict_for_batched_example( eval_images, features[inputs.HASH_KEY], detections, groundtruth, class_agnostic=class_agnostic, scale_to_absolute=True, original_image_spatial_shapes=original_image_spatial_shapes, true_image_shapes=true_image_shapes) if fields.InputDataFields.image_additional_channels in features: eval_dict[fields.InputDataFields.image_additional_channels] = features[ fields.InputDataFields.image_additional_channels] if class_agnostic: category_index = label_map_util.create_class_agnostic_category_index() else: category_index = label_map_util.create_category_index_from_labelmap( eval_input_config.label_map_path) vis_metric_ops = None if not use_tpu and use_original_images: keypoint_edges = [(kp.start, kp.end) for kp in eval_config.keypoint_edge ] eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections( category_index, max_examples_to_draw=eval_config.num_visualizations, max_boxes_to_draw=eval_config.max_num_boxes_to_visualize, min_score_thresh=eval_config.min_score_threshold, use_normalized_coordinates=False, keypoint_edges=keypoint_edges or None) vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops( eval_dict) # Eval metrics on a single example. eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, list(category_index.values()), eval_dict) for loss_key, loss_tensor in iter(losses_dict.items()): eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor) for var in optimizer_summary_vars: eval_metric_ops[var.op.name] = (var, tf.no_op()) if vis_metric_ops is not None: eval_metric_ops.update(vis_metric_ops) eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()} if eval_config.use_moving_averages: variable_averages = tf.train.ExponentialMovingAverage(0.0) variables_to_restore = variable_averages.variables_to_restore() keep_checkpoint_every_n_hours = ( train_config.keep_checkpoint_every_n_hours) saver = tf.train.Saver( variables_to_restore, keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) scaffold = tf.train.Scaffold(saver=saver) # EVAL executes on CPU, so use regular non-TPU EstimatorSpec. if use_tpu and mode != tf_estimator.ModeKeys.EVAL: return tf_estimator.tpu.TPUEstimatorSpec( mode=mode, scaffold_fn=scaffold_fn, predictions=detections, loss=total_loss, train_op=train_op, eval_metrics=eval_metric_ops, export_outputs=export_outputs) else: if scaffold is None: keep_checkpoint_every_n_hours = ( train_config.keep_checkpoint_every_n_hours) saver = tf.train.Saver( sharded=True, keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, save_relative_paths=True) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) scaffold = tf.train.Scaffold(saver=saver) return tf_estimator.EstimatorSpec( mode=mode, predictions=detections, loss=total_loss, train_op=train_op, eval_metric_ops=eval_metric_ops, export_outputs=export_outputs, scaffold=scaffold) return model_fn def create_estimator_and_inputs(run_config, hparams=None, pipeline_config_path=None, config_override=None, train_steps=None, sample_1_of_n_eval_examples=1, sample_1_of_n_eval_on_train_examples=1, model_fn_creator=create_model_fn, use_tpu_estimator=False, use_tpu=False, num_shards=1, params=None, override_eval_num_epochs=True, save_final_config=False, postprocess_on_cpu=False, export_to_tpu=None, **kwargs): """Creates `Estimator`, input functions, and steps. Args: run_config: A `RunConfig`. hparams: (optional) A `HParams`. pipeline_config_path: A path to a pipeline config file. config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to override the config from `pipeline_config_path`. train_steps: Number of training steps. If None, the number of training steps is set from the `TrainConfig` proto. sample_1_of_n_eval_examples: Integer representing how often an eval example should be sampled. If 1, will sample all examples. sample_1_of_n_eval_on_train_examples: Similar to `sample_1_of_n_eval_examples`, except controls the sampling of training data for evaluation. model_fn_creator: A function that creates a `model_fn` for `Estimator`. Follows the signature: * Args: * `detection_model_fn`: Function that returns `DetectionModel` instance. * `configs`: Dictionary of pipeline config objects. * `hparams`: `HParams` object. * Returns: `model_fn` for `Estimator`. use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False, an `Estimator` will be returned. use_tpu: Boolean, whether training and evaluation should run on TPU. Only used if `use_tpu_estimator` is True. num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator` is True. params: Parameter dictionary passed from the estimator. Only used if `use_tpu_estimator` is True. override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for eval_input. save_final_config: Whether to save final config (obtained after applying overrides) to `estimator.model_dir`. postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true, postprocess is scheduled on the host cpu. export_to_tpu: When use_tpu and export_to_tpu are true, `export_savedmodel()` exports a metagraph for serving on TPU besides the one on CPU. **kwargs: Additional keyword arguments for configuration override. Returns: A dictionary with the following fields: 'estimator': An `Estimator` or `TPUEstimator`. 'train_input_fn': A training input function. 'eval_input_fns': A list of all evaluation input functions. 'eval_input_names': A list of names for each evaluation input. 'eval_on_train_input_fn': An evaluation-on-train input function. 'predict_input_fn': A prediction input function. 'train_steps': Number of training steps. Either directly from input or from configuration. """ get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[ 'get_configs_from_pipeline_file'] merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[ 'merge_external_params_with_configs'] create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[ 'create_pipeline_proto_from_configs'] create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn'] create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn'] create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn'] detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base'] configs = get_configs_from_pipeline_file( pipeline_config_path, config_override=config_override) kwargs.update({ 'train_steps': train_steps, 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu }) if sample_1_of_n_eval_examples >= 1: kwargs.update({'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples}) if override_eval_num_epochs: kwargs.update({'eval_num_epochs': 1}) tf.logging.warning( 'Forced number of epochs for all eval validations to be 1.') configs = merge_external_params_with_configs( configs, hparams, kwargs_dict=kwargs) model_config = configs['model'] train_config = configs['train_config'] train_input_config = configs['train_input_config'] eval_config = configs['eval_config'] eval_input_configs = configs['eval_input_configs'] eval_on_train_input_config = copy.deepcopy(train_input_config) eval_on_train_input_config.sample_1_of_n_examples = ( sample_1_of_n_eval_on_train_examples) if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1: tf.logging.warning('Expected number of evaluation epochs is 1, but ' 'instead encountered `eval_on_train_input_config' '.num_epochs` = ' '{}. Overwriting `num_epochs` to 1.'.format( eval_on_train_input_config.num_epochs)) eval_on_train_input_config.num_epochs = 1 # update train_steps from config but only when non-zero value is provided if train_steps is None and train_config.num_steps != 0: train_steps = train_config.num_steps detection_model_fn = functools.partial( detection_model_fn_base, model_config=model_config) # Create the input functions for TRAIN/EVAL/PREDICT. train_input_fn = create_train_input_fn( train_config=train_config, train_input_config=train_input_config, model_config=model_config) eval_input_fns = [] for eval_input_config in eval_input_configs: eval_input_fns.append( create_eval_input_fn( eval_config=eval_config, eval_input_config=eval_input_config, model_config=model_config)) eval_input_names = [ eval_input_config.name for eval_input_config in eval_input_configs ] eval_on_train_input_fn = create_eval_input_fn( eval_config=eval_config, eval_input_config=eval_on_train_input_config, model_config=model_config) predict_input_fn = create_predict_input_fn( model_config=model_config, predict_input_config=eval_input_configs[0]) # Read export_to_tpu from hparams if not passed. if export_to_tpu is None and hparams is not None: export_to_tpu = hparams.get('export_to_tpu', False) tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s', use_tpu, export_to_tpu) model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu, postprocess_on_cpu) if use_tpu_estimator: estimator = tf_estimator.tpu.TPUEstimator( model_fn=model_fn, train_batch_size=train_config.batch_size, # For each core, only batch size 1 is supported for eval. eval_batch_size=num_shards * 1 if use_tpu else 1, use_tpu=use_tpu, config=run_config, export_to_tpu=export_to_tpu, eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU params=params if params else {}) else: estimator = tf_estimator.Estimator(model_fn=model_fn, config=run_config) # Write the as-run pipeline config to disk. if run_config.is_chief and save_final_config: pipeline_config_final = create_pipeline_proto_from_configs(configs) config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir) return dict( estimator=estimator, train_input_fn=train_input_fn, eval_input_fns=eval_input_fns, eval_input_names=eval_input_names, eval_on_train_input_fn=eval_on_train_input_fn, predict_input_fn=predict_input_fn, train_steps=train_steps) def create_train_and_eval_specs(train_input_fn, eval_input_fns, eval_on_train_input_fn, predict_input_fn, train_steps, eval_on_train_data=False, final_exporter_name='Servo', eval_spec_names=None): """Creates a `TrainSpec` and `EvalSpec`s. Args: train_input_fn: Function that produces features and labels on train data. eval_input_fns: A list of functions that produce features and labels on eval data. eval_on_train_input_fn: Function that produces features and labels for evaluation on train data. predict_input_fn: Function that produces features for inference. train_steps: Number of training steps. eval_on_train_data: Whether to evaluate model on training data. Default is False. final_exporter_name: String name given to `FinalExporter`. eval_spec_names: A list of string names for each `EvalSpec`. Returns: Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is True, the last `EvalSpec` in the list will correspond to training data. The rest EvalSpecs in the list are evaluation datas. """ train_spec = tf_estimator.TrainSpec( input_fn=train_input_fn, max_steps=train_steps) if eval_spec_names is None: eval_spec_names = [str(i) for i in range(len(eval_input_fns))] eval_specs = [] for index, (eval_spec_name, eval_input_fn) in enumerate(zip(eval_spec_names, eval_input_fns)): # Uses final_exporter_name as exporter_name for the first eval spec for # backward compatibility. if index == 0: exporter_name = final_exporter_name else: exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name) exporter = tf_estimator.FinalExporter( name=exporter_name, serving_input_receiver_fn=predict_input_fn) eval_specs.append( tf_estimator.EvalSpec( name=eval_spec_name, input_fn=eval_input_fn, steps=None, exporters=exporter)) if eval_on_train_data: eval_specs.append( tf_estimator.EvalSpec( name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None)) return train_spec, eval_specs def _evaluate_checkpoint(estimator, input_fn, checkpoint_path, name, max_retries=0): """Evaluates a checkpoint. Args: estimator: Estimator object to use for evaluation. input_fn: Input function to use for evaluation. checkpoint_path: Path of the checkpoint to evaluate. name: Namescope for eval summary. max_retries: Maximum number of times to retry the evaluation on encountering a tf.errors.InvalidArgumentError. If negative, will always retry the evaluation. Returns: Estimator evaluation results. """ always_retry = True if max_retries < 0 else False retries = 0 while always_retry or retries <= max_retries: try: return estimator.evaluate( input_fn=input_fn, steps=None, checkpoint_path=checkpoint_path, name=name) except tf.errors.InvalidArgumentError as e: if always_retry or retries < max_retries: tf.logging.info('Retrying checkpoint evaluation after exception: %s', e) retries += 1 else: raise e def continuous_eval_generator(estimator, model_dir, input_fn, train_steps, name, max_retries=0): """Perform continuous evaluation on checkpoints written to a model directory. Args: estimator: Estimator object to use for evaluation. model_dir: Model directory to read checkpoints for continuous evaluation. input_fn: Input function to use for evaluation. train_steps: Number of training steps. This is used to infer the last checkpoint and stop evaluation loop. name: Namescope for eval summary. max_retries: Maximum number of times to retry the evaluation on encountering a tf.errors.InvalidArgumentError. If negative, will always retry the evaluation. Yields: Pair of current step and eval_results. """ def terminate_eval(): tf.logging.info('Terminating eval after 180 seconds of no checkpoints') return True for ckpt in tf.train.checkpoints_iterator( model_dir, min_interval_secs=180, timeout=None, timeout_fn=terminate_eval): tf.logging.info('Starting Evaluation.') try: eval_results = _evaluate_checkpoint( estimator=estimator, input_fn=input_fn, checkpoint_path=ckpt, name=name, max_retries=max_retries) tf.logging.info('Eval results: %s' % eval_results) # Terminate eval job when final checkpoint is reached current_step = int(os.path.basename(ckpt).split('-')[1]) yield (current_step, eval_results) if current_step >= train_steps: tf.logging.info( 'Evaluation finished after training step %d' % current_step) break except tf.errors.NotFoundError: tf.logging.info( 'Checkpoint %s no longer exists, skipping checkpoint' % ckpt) def continuous_eval(estimator, model_dir, input_fn, train_steps, name, max_retries=0): """Performs continuous evaluation on checkpoints written to a model directory. Args: estimator: Estimator object to use for evaluation. model_dir: Model directory to read checkpoints for continuous evaluation. input_fn: Input function to use for evaluation. train_steps: Number of training steps. This is used to infer the last checkpoint and stop evaluation loop. name: Namescope for eval summary. max_retries: Maximum number of times to retry the evaluation on encountering a tf.errors.InvalidArgumentError. If negative, will always retry the evaluation. """ for current_step, eval_results in continuous_eval_generator( estimator, model_dir, input_fn, train_steps, name, max_retries): tf.logging.info('Step %s, Eval results: %s', current_step, eval_results) def populate_experiment(run_config, hparams, pipeline_config_path, train_steps=None, eval_steps=None, model_fn_creator=create_model_fn, **kwargs): """Populates an `Experiment` object. EXPERIMENT CLASS IS DEPRECATED. Please switch to tf.estimator.train_and_evaluate. As an example, see model_main.py. Args: run_config: A `RunConfig`. hparams: A `HParams`. pipeline_config_path: A path to a pipeline config file. train_steps: Number of training steps. If None, the number of training steps is set from the `TrainConfig` proto. eval_steps: Number of evaluation steps per evaluation cycle. If None, the number of evaluation steps is set from the `EvalConfig` proto. model_fn_creator: A function that creates a `model_fn` for `Estimator`. Follows the signature: * Args: * `detection_model_fn`: Function that returns `DetectionModel` instance. * `configs`: Dictionary of pipeline config objects. * `hparams`: `HParams` object. * Returns: `model_fn` for `Estimator`. **kwargs: Additional keyword arguments for configuration override. Returns: An `Experiment` that defines all aspects of training, evaluation, and export. """ tf.logging.warning('Experiment is being deprecated. Please use ' 'tf.estimator.train_and_evaluate(). See model_main.py for ' 'an example.') train_and_eval_dict = create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps, eval_steps=eval_steps, model_fn_creator=model_fn_creator, save_final_config=True, **kwargs) estimator = train_and_eval_dict['estimator'] train_input_fn = train_and_eval_dict['train_input_fn'] eval_input_fns = train_and_eval_dict['eval_input_fns'] predict_input_fn = train_and_eval_dict['predict_input_fn'] train_steps = train_and_eval_dict['train_steps'] export_strategies = [ contrib_learn.utils.saved_model_export_utils.make_export_strategy( serving_input_fn=predict_input_fn) ] return contrib_learn.Experiment( estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fns[0], train_steps=train_steps, eval_steps=None, export_strategies=export_strategies, eval_delay_secs=120, )
50,837
42.712812
80
py
models
models-master/research/object_detection/exporter_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.export_inference_graph.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest import numpy as np import six import tensorflow.compat.v1 as tf from google.protobuf import text_format from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.tools import strip_unused_lib from object_detection import exporter from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.core import model from object_detection.protos import graph_rewriter_pb2 from object_detection.protos import pipeline_pb2 from object_detection.utils import ops from object_detection.utils import tf_version from object_detection.utils import variables_helper if six.PY2: import mock # pylint: disable=g-import-not-at-top else: mock = unittest.mock # pylint: disable=g-import-not-at-top, g-importing-member # pylint: disable=g-import-not-at-top try: import tf_slim as slim except ImportError: # TF 2.0 doesn't ship with contrib. pass # pylint: enable=g-import-not-at-top class FakeModel(model.DetectionModel): def __init__(self, add_detection_keypoints=False, add_detection_masks=False, add_detection_features=False): self._add_detection_keypoints = add_detection_keypoints self._add_detection_masks = add_detection_masks self._add_detection_features = add_detection_features def preprocess(self, inputs): true_image_shapes = [] # Doesn't matter for the fake model. return tf.identity(inputs), true_image_shapes def predict(self, preprocessed_inputs, true_image_shapes): return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)} def postprocess(self, prediction_dict, true_image_shapes): with tf.control_dependencies(list(prediction_dict.values())): postprocessed_tensors = { 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]], tf.float32), 'detection_scores': tf.constant([[0.7, 0.6], [0.9, 0.0]], tf.float32), 'detection_multiclass_scores': tf.constant([[[0.3, 0.7], [0.4, 0.6]], [[0.1, 0.9], [0.0, 0.0]]], tf.float32), 'detection_classes': tf.constant([[0, 1], [1, 0]], tf.float32), 'num_detections': tf.constant([2, 1], tf.float32), 'raw_detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.5, 0.0, 0.5]]], tf.float32), 'raw_detection_scores': tf.constant([[0.7, 0.6], [0.9, 0.5]], tf.float32), } if self._add_detection_keypoints: postprocessed_tensors['detection_keypoints'] = tf.constant( np.arange(48).reshape([2, 2, 6, 2]), tf.float32) if self._add_detection_masks: postprocessed_tensors['detection_masks'] = tf.constant( np.arange(64).reshape([2, 2, 4, 4]), tf.float32) if self._add_detection_features: # let fake detection features have shape [4, 4, 10] postprocessed_tensors['detection_features'] = tf.constant( np.ones((2, 2, 4, 4, 10)), tf.float32) return postprocessed_tensors def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): pass def restore_from_objects(self, fine_tune_checkpoint_type): pass def loss(self, prediction_dict, true_image_shapes): pass def regularization_losses(self): pass def updates(self): pass @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ExportInferenceGraphTest(tf.test.TestCase): def _save_checkpoint_from_mock_model(self, checkpoint_path, use_moving_averages, enable_quantization=False): g = tf.Graph() with g.as_default(): mock_model = FakeModel() preprocessed_inputs, true_image_shapes = mock_model.preprocess( tf.placeholder(tf.float32, shape=[None, None, None, 3])) predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) mock_model.postprocess(predictions, true_image_shapes) if use_moving_averages: tf.train.ExponentialMovingAverage(0.0).apply() tf.train.get_or_create_global_step() if enable_quantization: graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() graph_rewriter_config.quantization.delay = 500000 graph_rewriter_fn = graph_rewriter_builder.build( graph_rewriter_config, is_training=False) graph_rewriter_fn() saver = tf.train.Saver() init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) saver.save(sess, checkpoint_path) def _load_inference_graph(self, inference_graph_path, is_binary=True): od_graph = tf.Graph() with od_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(inference_graph_path, mode='rb') as fid: if is_binary: od_graph_def.ParseFromString(fid.read()) else: text_format.Parse(fid.read(), od_graph_def) tf.import_graph_def(od_graph_def, name='') return od_graph def _create_tf_example(self, image_array): with self.test_session(): encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval() def _bytes_feature(value): return tf.train.Feature( bytes_list=tf.train.BytesList(value=[six.ensure_binary(value)])) example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_feature(encoded_image), 'image/format': _bytes_feature('jpg'), 'image/source_id': _bytes_feature('image_id') })).SerializeToString() return example def test_export_graph_with_image_tensor_input(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) def test_write_inference_graph(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory, write_inference_graph=True) self.assertTrue(os.path.exists(os.path.join( output_directory, 'inference_graph.pbtxt'))) def test_export_graph_with_fixed_size_image_tensor_input(self): input_shape = [1, 320, 320, 3] tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model( trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory, input_shape=input_shape) saved_model_path = os.path.join(output_directory, 'saved_model') self.assertTrue( os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: meta_graph = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) signature = meta_graph.signature_def['serving_default'] input_tensor_name = signature.inputs['inputs'].name image_tensor = od_graph.get_tensor_by_name(input_tensor_name) self.assertSequenceEqual(image_tensor.get_shape().as_list(), input_shape) def test_export_graph_with_tf_example_input(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) def test_export_graph_with_fixed_size_tf_example_input(self): input_shape = [1, 320, 320, 3] tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model( trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory, input_shape=input_shape) saved_model_path = os.path.join(output_directory, 'saved_model') self.assertTrue( os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) def test_export_graph_with_encoded_image_string_input(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='encoded_image_string_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) def test_export_graph_with_fixed_size_encoded_image_string_input(self): input_shape = [1, 320, 320, 3] tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model( trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='encoded_image_string_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory, input_shape=input_shape) saved_model_path = os.path.join(output_directory, 'saved_model') self.assertTrue( os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) def _get_variables_in_checkpoint(self, checkpoint_file): return set([ var_name for var_name, _ in tf.train.list_variables(checkpoint_file)]) def test_replace_variable_values_with_moving_averages(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') new_checkpoint_prefix = os.path.join(tmp_dir, 'new.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) graph = tf.Graph() with graph.as_default(): fake_model = FakeModel() preprocessed_inputs, true_image_shapes = fake_model.preprocess( tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3])) predictions = fake_model.predict(preprocessed_inputs, true_image_shapes) fake_model.postprocess(predictions, true_image_shapes) exporter.replace_variable_values_with_moving_averages( graph, trained_checkpoint_prefix, new_checkpoint_prefix) expected_variables = set(['conv2d/bias', 'conv2d/kernel']) variables_in_old_ckpt = self._get_variables_in_checkpoint( trained_checkpoint_prefix) self.assertIn('conv2d/bias/ExponentialMovingAverage', variables_in_old_ckpt) self.assertIn('conv2d/kernel/ExponentialMovingAverage', variables_in_old_ckpt) variables_in_new_ckpt = self._get_variables_in_checkpoint( new_checkpoint_prefix) self.assertTrue(expected_variables.issubset(variables_in_new_ckpt)) self.assertNotIn('conv2d/bias/ExponentialMovingAverage', variables_in_new_ckpt) self.assertNotIn('conv2d/kernel/ExponentialMovingAverage', variables_in_new_ckpt) def test_export_graph_with_moving_averages(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = True exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) expected_variables = set(['conv2d/bias', 'conv2d/kernel', 'global_step']) actual_variables = set( [var_name for var_name, _ in tf.train.list_variables(output_directory)]) self.assertTrue(expected_variables.issubset(actual_variables)) def test_export_model_with_quantization_nodes(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model( trained_checkpoint_prefix, use_moving_averages=False, enable_quantization=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'inference_graph.pbtxt') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() text_format.Merge( """graph_rewriter { quantization { delay: 50000 activation_bits: 8 weight_bits: 8 } }""", pipeline_config) exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory, write_inference_graph=True) self._load_inference_graph(inference_graph_path, is_binary=False) has_quant_nodes = False for v in variables_helper.get_global_variables_safely(): if six.ensure_str(v.op.name).endswith('act_quant/min'): has_quant_nodes = True break self.assertTrue(has_quant_nodes) def test_export_model_with_all_output_nodes(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True, add_detection_features=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph): inference_graph.get_tensor_by_name('image_tensor:0') inference_graph.get_tensor_by_name('detection_boxes:0') inference_graph.get_tensor_by_name('detection_scores:0') inference_graph.get_tensor_by_name('detection_multiclass_scores:0') inference_graph.get_tensor_by_name('detection_classes:0') inference_graph.get_tensor_by_name('detection_keypoints:0') inference_graph.get_tensor_by_name('detection_masks:0') inference_graph.get_tensor_by_name('num_detections:0') inference_graph.get_tensor_by_name('detection_features:0') def test_export_model_with_detection_only_nodes(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(add_detection_masks=False) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph): inference_graph.get_tensor_by_name('image_tensor:0') inference_graph.get_tensor_by_name('detection_boxes:0') inference_graph.get_tensor_by_name('detection_scores:0') inference_graph.get_tensor_by_name('detection_multiclass_scores:0') inference_graph.get_tensor_by_name('detection_classes:0') inference_graph.get_tensor_by_name('num_detections:0') with self.assertRaises(KeyError): inference_graph.get_tensor_by_name('detection_keypoints:0') inference_graph.get_tensor_by_name('detection_masks:0') def test_export_model_with_detection_only_nodes_and_detection_features(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(add_detection_features=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph): inference_graph.get_tensor_by_name('image_tensor:0') inference_graph.get_tensor_by_name('detection_boxes:0') inference_graph.get_tensor_by_name('detection_scores:0') inference_graph.get_tensor_by_name('detection_multiclass_scores:0') inference_graph.get_tensor_by_name('detection_classes:0') inference_graph.get_tensor_by_name('num_detections:0') inference_graph.get_tensor_by_name('detection_features:0') with self.assertRaises(KeyError): inference_graph.get_tensor_by_name('detection_keypoints:0') inference_graph.get_tensor_by_name('detection_masks:0') def test_export_and_run_inference_with_image_tensor(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph) as sess: image_tensor = inference_graph.get_tensor_by_name('image_tensor:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={image_tensor: np.ones((2, 4, 4, 3)).astype(np.uint8)}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def _create_encoded_image_string(self, image_array_np, encoding_format): od_graph = tf.Graph() with od_graph.as_default(): if encoding_format == 'jpg': encoded_string = tf.image.encode_jpeg(image_array_np) elif encoding_format == 'png': encoded_string = tf.image.encode_png(image_array_np) else: raise ValueError('Supports only the following formats: `jpg`, `png`') with self.test_session(graph=od_graph): return encoded_string.eval() def test_export_and_run_inference_with_encoded_image_string_tensor(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='encoded_image_string_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) jpg_image_str = self._create_encoded_image_string( np.ones((4, 4, 3)).astype(np.uint8), 'jpg') png_image_str = self._create_encoded_image_string( np.ones((4, 4, 3)).astype(np.uint8), 'png') with self.test_session(graph=inference_graph) as sess: image_str_tensor = inference_graph.get_tensor_by_name( 'encoded_image_string_tensor:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') multiclass_scores = inference_graph.get_tensor_by_name( 'detection_multiclass_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') for image_str in [jpg_image_str, png_image_str]: image_str_batch_np = np.hstack([image_str]* 2) (boxes_np, scores_np, multiclass_scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [ boxes, scores, multiclass_scores, classes, keypoints, masks, num_detections ], feed_dict={image_str_tensor: image_str_batch_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(multiclass_scores_np, [[[0.3, 0.7], [0.4, 0.6]], [[0.1, 0.9], [0.0, 0.0]]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_raise_runtime_error_on_images_with_different_sizes(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='encoded_image_string_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) large_image = self._create_encoded_image_string( np.ones((4, 4, 3)).astype(np.uint8), 'jpg') small_image = self._create_encoded_image_string( np.ones((2, 2, 3)).astype(np.uint8), 'jpg') image_str_batch_np = np.hstack([large_image, small_image]) with self.test_session(graph=inference_graph) as sess: image_str_tensor = inference_graph.get_tensor_by_name( 'encoded_image_string_tensor:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'TensorArray.*shape'): sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={image_str_tensor: image_str_batch_np}) def test_export_and_run_inference_with_tf_example(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) tf_example_np = np.expand_dims(self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8)), axis=0) with self.test_session(graph=inference_graph) as sess: tf_example = inference_graph.get_tensor_by_name('tf_example:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_write_frozen_graph(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') tf.gfile.MakeDirs(output_directory) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False detection_model = model_builder.build(pipeline_config.model, is_training=False) outputs, _ = exporter.build_detection_graph( input_type='tf_example', detection_model=detection_model, input_shape=None, output_collection_name='inference_op', graph_hook_fn=None) output_node_names = ','.join(list(outputs.keys())) saver = tf.train.Saver() input_saver_def = saver.as_saver_def() exporter.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=input_saver_def, input_checkpoint=trained_checkpoint_prefix, output_node_names=output_node_names, restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', output_graph=inference_graph_path, clear_devices=True, initializer_nodes='') inference_graph = self._load_inference_graph(inference_graph_path) tf_example_np = np.expand_dims(self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8)), axis=0) with self.test_session(graph=inference_graph) as sess: tf_example = inference_graph.get_tensor_by_name('tf_example:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_export_graph_saves_pipeline_file(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) expected_pipeline_path = os.path.join( output_directory, 'pipeline.config') self.assertTrue(os.path.exists(expected_pipeline_path)) written_pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(expected_pipeline_path, 'r') as f: proto_str = f.read() text_format.Merge(proto_str, written_pipeline_config) self.assertProtoEquals(pipeline_config, written_pipeline_config) def test_export_saved_model_and_run_inference(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) output_directory = os.path.join(tmp_dir, 'output') saved_model_path = os.path.join(output_directory, 'saved_model') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) tf_example_np = np.hstack([self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))] * 2) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: meta_graph = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) signature = meta_graph.signature_def['serving_default'] input_tensor_name = signature.inputs['inputs'].name tf_example = od_graph.get_tensor_by_name(input_tensor_name) boxes = od_graph.get_tensor_by_name( signature.outputs['detection_boxes'].name) scores = od_graph.get_tensor_by_name( signature.outputs['detection_scores'].name) multiclass_scores = od_graph.get_tensor_by_name( signature.outputs['detection_multiclass_scores'].name) classes = od_graph.get_tensor_by_name( signature.outputs['detection_classes'].name) keypoints = od_graph.get_tensor_by_name( signature.outputs['detection_keypoints'].name) masks = od_graph.get_tensor_by_name( signature.outputs['detection_masks'].name) num_detections = od_graph.get_tensor_by_name( signature.outputs['num_detections'].name) (boxes_np, scores_np, multiclass_scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, multiclass_scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(multiclass_scores_np, [[[0.3, 0.7], [0.4, 0.6]], [[0.1, 0.9], [0.0, 0.0]]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_write_saved_model(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) output_directory = os.path.join(tmp_dir, 'output') saved_model_path = os.path.join(output_directory, 'saved_model') tf.gfile.MakeDirs(output_directory) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False detection_model = model_builder.build(pipeline_config.model, is_training=False) outputs, placeholder_tensor = exporter.build_detection_graph( input_type='tf_example', detection_model=detection_model, input_shape=None, output_collection_name='inference_op', graph_hook_fn=None) output_node_names = ','.join(list(outputs.keys())) saver = tf.train.Saver() input_saver_def = saver.as_saver_def() frozen_graph_def = exporter.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=input_saver_def, input_checkpoint=trained_checkpoint_prefix, output_node_names=output_node_names, restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', output_graph='', clear_devices=True, initializer_nodes='') exporter.write_saved_model( saved_model_path=saved_model_path, frozen_graph_def=frozen_graph_def, inputs=placeholder_tensor, outputs=outputs) tf_example_np = np.hstack([self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))] * 2) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: meta_graph = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) signature = meta_graph.signature_def['serving_default'] input_tensor_name = signature.inputs['inputs'].name tf_example = od_graph.get_tensor_by_name(input_tensor_name) boxes = od_graph.get_tensor_by_name( signature.outputs['detection_boxes'].name) scores = od_graph.get_tensor_by_name( signature.outputs['detection_scores'].name) classes = od_graph.get_tensor_by_name( signature.outputs['detection_classes'].name) keypoints = od_graph.get_tensor_by_name( signature.outputs['detection_keypoints'].name) masks = od_graph.get_tensor_by_name( signature.outputs['detection_masks'].name) num_detections = od_graph.get_tensor_by_name( signature.outputs['num_detections'].name) (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_export_checkpoint_and_run_inference(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) output_directory = os.path.join(tmp_dir, 'output') model_path = os.path.join(output_directory, 'model.ckpt') meta_graph_path = model_path + '.meta' with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) tf_example_np = np.hstack([self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))] * 2) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: new_saver = tf.train.import_meta_graph(meta_graph_path) new_saver.restore(sess, model_path) tf_example = od_graph.get_tensor_by_name('tf_example:0') boxes = od_graph.get_tensor_by_name('detection_boxes:0') scores = od_graph.get_tensor_by_name('detection_scores:0') classes = od_graph.get_tensor_by_name('detection_classes:0') keypoints = od_graph.get_tensor_by_name('detection_keypoints:0') masks = od_graph.get_tensor_by_name('detection_masks:0') num_detections = od_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_write_graph_and_checkpoint(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) output_directory = os.path.join(tmp_dir, 'output') model_path = os.path.join(output_directory, 'model.ckpt') meta_graph_path = model_path + '.meta' tf.gfile.MakeDirs(output_directory) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False detection_model = model_builder.build(pipeline_config.model, is_training=False) exporter.build_detection_graph( input_type='tf_example', detection_model=detection_model, input_shape=None, output_collection_name='inference_op', graph_hook_fn=None) saver = tf.train.Saver() input_saver_def = saver.as_saver_def() exporter.write_graph_and_checkpoint( inference_graph_def=tf.get_default_graph().as_graph_def(), model_path=model_path, input_saver_def=input_saver_def, trained_checkpoint_prefix=trained_checkpoint_prefix) tf_example_np = np.hstack([self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))] * 2) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: new_saver = tf.train.import_meta_graph(meta_graph_path) new_saver.restore(sess, model_path) tf_example = od_graph.get_tensor_by_name('tf_example:0') boxes = od_graph.get_tensor_by_name('detection_boxes:0') scores = od_graph.get_tensor_by_name('detection_scores:0') raw_boxes = od_graph.get_tensor_by_name('raw_detection_boxes:0') raw_scores = od_graph.get_tensor_by_name('raw_detection_scores:0') classes = od_graph.get_tensor_by_name('detection_classes:0') keypoints = od_graph.get_tensor_by_name('detection_keypoints:0') masks = od_graph.get_tensor_by_name('detection_masks:0') num_detections = od_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, raw_boxes_np, raw_scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, raw_boxes, raw_scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(raw_boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.5, 0.0, 0.5]]]) self.assertAllClose(raw_scores_np, [[0.7, 0.6], [0.9, 0.5]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_rewrite_nn_resize_op(self): g = tf.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) s = ops.nearest_neighbor_upsampling(x, 2) t = s + y exporter.rewrite_nn_resize_op() resize_op_found = False for op in g.get_operations(): if op.type == 'ResizeNearestNeighbor': resize_op_found = True self.assertEqual(op.inputs[0], x) self.assertEqual(op.outputs[0].consumers()[0], t.op) break self.assertTrue(resize_op_found) def test_rewrite_nn_resize_op_quantized(self): g = tf.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) x_conv = slim.conv2d(x, 8, 1) y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) s = ops.nearest_neighbor_upsampling(x_conv, 2) t = s + y graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() graph_rewriter_config.quantization.delay = 500000 graph_rewriter_fn = graph_rewriter_builder.build( graph_rewriter_config, is_training=False) graph_rewriter_fn() exporter.rewrite_nn_resize_op(is_quantized=True) resize_op_found = False for op in g.get_operations(): if op.type == 'ResizeNearestNeighbor': resize_op_found = True self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars') self.assertEqual(op.outputs[0].consumers()[0], t.op) break self.assertTrue(resize_op_found) def test_rewrite_nn_resize_op_odd_size(self): g = tf.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) s = ops.nearest_neighbor_upsampling(x, 2) t = s[:, :19, :19, :] exporter.rewrite_nn_resize_op() resize_op_found = False for op in g.get_operations(): if op.type == 'ResizeNearestNeighbor': resize_op_found = True self.assertEqual(op.inputs[0], x) self.assertEqual(op.outputs[0].consumers()[0], t.op) break self.assertTrue(resize_op_found) def test_rewrite_nn_resize_op_quantized_odd_size(self): g = tf.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) x_conv = slim.conv2d(x, 8, 1) s = ops.nearest_neighbor_upsampling(x_conv, 2) t = s[:, :19, :19, :] graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() graph_rewriter_config.quantization.delay = 500000 graph_rewriter_fn = graph_rewriter_builder.build( graph_rewriter_config, is_training=False) graph_rewriter_fn() exporter.rewrite_nn_resize_op(is_quantized=True) resize_op_found = False for op in g.get_operations(): if op.type == 'ResizeNearestNeighbor': resize_op_found = True self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars') self.assertEqual(op.outputs[0].consumers()[0], t.op) break self.assertTrue(resize_op_found) def test_rewrite_nn_resize_op_multiple_path(self): g = tf.Graph() with g.as_default(): with tf.name_scope('nearest_upsampling'): x_1 = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) x_1_stack_1 = tf.stack([x_1] * 2, axis=3) x_1_reshape_1 = tf.reshape(x_1_stack_1, [8, 10, 20, 8]) x_1_stack_2 = tf.stack([x_1_reshape_1] * 2, axis=2) x_1_reshape_2 = tf.reshape(x_1_stack_2, [8, 20, 20, 8]) with tf.name_scope('nearest_upsampling'): x_2 = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) x_2_stack_1 = tf.stack([x_2] * 2, axis=3) x_2_reshape_1 = tf.reshape(x_2_stack_1, [8, 10, 20, 8]) x_2_stack_2 = tf.stack([x_2_reshape_1] * 2, axis=2) x_2_reshape_2 = tf.reshape(x_2_stack_2, [8, 20, 20, 8]) t = x_1_reshape_2 + x_2_reshape_2 exporter.rewrite_nn_resize_op() graph_def = g.as_graph_def() graph_def = strip_unused_lib.strip_unused( graph_def, input_node_names=[ 'nearest_upsampling/Placeholder', 'nearest_upsampling_1/Placeholder' ], output_node_names=['add'], placeholder_type_enum=dtypes.float32.as_datatype_enum) counter_resize_op = 0 t_input_ops = [op.name for op in t.op.inputs] for node in graph_def.node: # Make sure Stacks are replaced. self.assertNotEqual(node.op, 'Pack') if node.op == 'ResizeNearestNeighbor': counter_resize_op += 1 self.assertIn(six.ensure_str(node.name) + ':0', t_input_ops) self.assertEqual(counter_resize_op, 2) if __name__ == '__main__': tf.test.main()
58,708
47.519835
81
py
models
models-master/research/object_detection/export_tflite_ssd_graph_lib_tf1_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.export_tflite_ssd_graph.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest import numpy as np import six import tensorflow.compat.v1 as tf import tf_slim as slim from tensorflow.core.framework import types_pb2 from object_detection import export_tflite_ssd_graph_lib from object_detection import exporter from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.core import model from object_detection.protos import graph_rewriter_pb2 from object_detection.protos import pipeline_pb2 from object_detection.protos import post_processing_pb2 from object_detection.utils import tf_version # pylint: disable=g-import-not-at-top if six.PY2: import mock else: from unittest import mock # pylint: disable=g-importing-member # pylint: enable=g-import-not-at-top class FakeModel(model.DetectionModel): def __init__(self, add_detection_masks=False): self._add_detection_masks = add_detection_masks def preprocess(self, inputs): pass def predict(self, preprocessed_inputs, true_image_shapes): features = slim.conv2d(preprocessed_inputs, 3, 1) with tf.control_dependencies([features]): prediction_tensors = { 'box_encodings': tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]], tf.float32), 'class_predictions_with_background': tf.constant([[[0.7, 0.6], [0.9, 0.0]]], tf.float32), } with tf.control_dependencies( [tf.convert_to_tensor(features.get_shape().as_list()[1:3])]): prediction_tensors['anchors'] = tf.constant( [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32) return prediction_tensors def postprocess(self, prediction_tensors, true_image_shapes): pass def restore_map(self, checkpoint_path, from_detection_checkpoint): pass def restore_from_objects(self, fine_tune_checkpoint_type): pass def loss(self, prediction_dict, true_image_shapes): pass def regularization_losses(self): pass def updates(self): pass @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ExportTfliteGraphTest(tf.test.TestCase): def _save_checkpoint_from_mock_model(self, checkpoint_path, use_moving_averages, quantize=False, num_channels=3): g = tf.Graph() with g.as_default(): mock_model = FakeModel() inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, num_channels]) mock_model.predict(inputs, true_image_shapes=None) if use_moving_averages: tf.train.ExponentialMovingAverage(0.0).apply() tf.train.get_or_create_global_step() if quantize: graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() graph_rewriter_config.quantization.delay = 500000 graph_rewriter_fn = graph_rewriter_builder.build( graph_rewriter_config, is_training=False) graph_rewriter_fn() saver = tf.train.Saver() init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) saver.save(sess, checkpoint_path) def _assert_quant_vars_exists(self, tflite_graph_file): with tf.gfile.Open(tflite_graph_file, mode='rb') as f: graph_string = f.read() print(graph_string) self.assertIn(six.ensure_binary('quant'), graph_string) def _import_graph_and_run_inference(self, tflite_graph_file, num_channels=3): """Imports a tflite graph, runs single inference and returns outputs.""" graph = tf.Graph() with graph.as_default(): graph_def = tf.GraphDef() with tf.gfile.Open(tflite_graph_file, mode='rb') as f: graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='') input_tensor = graph.get_tensor_by_name('normalized_input_image_tensor:0') box_encodings = graph.get_tensor_by_name('raw_outputs/box_encodings:0') class_predictions = graph.get_tensor_by_name( 'raw_outputs/class_predictions:0') with self.test_session(graph) as sess: [box_encodings_np, class_predictions_np] = sess.run( [box_encodings, class_predictions], feed_dict={input_tensor: np.random.rand(1, 10, 10, num_channels)}) return box_encodings_np, class_predictions_np def _export_graph(self, pipeline_config, num_channels=3, additional_output_tensors=()): """Exports a tflite graph.""" output_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt') tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb') quantize = pipeline_config.HasField('graph_rewriter') self._save_checkpoint_from_mock_model( trained_checkpoint_prefix, use_moving_averages=pipeline_config.eval_config.use_moving_averages, quantize=quantize, num_channels=num_channels) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() with tf.Graph().as_default(): tf.identity( tf.constant([[1, 2], [3, 4]], tf.uint8), name='UnattachedTensor') export_tflite_ssd_graph_lib.export_tflite_graph( pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_dir=output_dir, add_postprocessing_op=False, max_detections=10, max_classes_per_detection=1, additional_output_tensors=additional_output_tensors) return tflite_graph_file def _export_graph_with_postprocessing_op(self, pipeline_config, num_channels=3, additional_output_tensors=()): """Exports a tflite graph with custom postprocessing op.""" output_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(output_dir, 'model.ckpt') tflite_graph_file = os.path.join(output_dir, 'tflite_graph.pb') quantize = pipeline_config.HasField('graph_rewriter') self._save_checkpoint_from_mock_model( trained_checkpoint_prefix, use_moving_averages=pipeline_config.eval_config.use_moving_averages, quantize=quantize, num_channels=num_channels) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() with tf.Graph().as_default(): tf.identity( tf.constant([[1, 2], [3, 4]], tf.uint8), name='UnattachedTensor') export_tflite_ssd_graph_lib.export_tflite_graph( pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_dir=output_dir, add_postprocessing_op=True, max_detections=10, max_classes_per_detection=1, additional_output_tensors=additional_output_tensors) return tflite_graph_file def test_export_tflite_graph_with_moving_averages(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = True pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.model.ssd.num_classes = 2 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 tflite_graph_file = self._export_graph(pipeline_config) self.assertTrue(os.path.exists(tflite_graph_file)) (box_encodings_np, class_predictions_np ) = self._import_graph_and_run_inference(tflite_graph_file) self.assertAllClose(box_encodings_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) def test_export_tflite_graph_without_moving_averages(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.model.ssd.num_classes = 2 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 tflite_graph_file = self._export_graph(pipeline_config) self.assertTrue(os.path.exists(tflite_graph_file)) (box_encodings_np, class_predictions_np ) = self._import_graph_and_run_inference(tflite_graph_file) self.assertAllClose(box_encodings_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) def test_export_tflite_graph_grayscale(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 (pipeline_config.model.ssd.image_resizer.fixed_shape_resizer ).convert_to_grayscale = True pipeline_config.model.ssd.num_classes = 2 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 tflite_graph_file = self._export_graph(pipeline_config, num_channels=1) self.assertTrue(os.path.exists(tflite_graph_file)) (box_encodings_np, class_predictions_np) = self._import_graph_and_run_inference( tflite_graph_file, num_channels=1) self.assertAllClose(box_encodings_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) def test_export_tflite_graph_with_quantization(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.graph_rewriter.quantization.delay = 500000 pipeline_config.model.ssd.num_classes = 2 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 tflite_graph_file = self._export_graph(pipeline_config) self.assertTrue(os.path.exists(tflite_graph_file)) self._assert_quant_vars_exists(tflite_graph_file) (box_encodings_np, class_predictions_np ) = self._import_graph_and_run_inference(tflite_graph_file) self.assertAllClose(box_encodings_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) self.assertAllClose(class_predictions_np, [[[0.7, 0.6], [0.9, 0.0]]]) def test_export_tflite_graph_with_softmax_score_conversion(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False pipeline_config.model.ssd.post_processing.score_converter = ( post_processing_pb2.PostProcessing.SOFTMAX) pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.model.ssd.num_classes = 2 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 tflite_graph_file = self._export_graph(pipeline_config) self.assertTrue(os.path.exists(tflite_graph_file)) (box_encodings_np, class_predictions_np ) = self._import_graph_and_run_inference(tflite_graph_file) self.assertAllClose(box_encodings_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) self.assertAllClose(class_predictions_np, [[[0.524979, 0.475021], [0.710949, 0.28905]]]) def test_export_tflite_graph_with_sigmoid_score_conversion(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False pipeline_config.model.ssd.post_processing.score_converter = ( post_processing_pb2.PostProcessing.SIGMOID) pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.model.ssd.num_classes = 2 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 tflite_graph_file = self._export_graph(pipeline_config) self.assertTrue(os.path.exists(tflite_graph_file)) (box_encodings_np, class_predictions_np ) = self._import_graph_and_run_inference(tflite_graph_file) self.assertAllClose(box_encodings_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]]) self.assertAllClose(class_predictions_np, [[[0.668188, 0.645656], [0.710949, 0.5]]]) def test_export_tflite_graph_with_postprocessing_op(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False pipeline_config.model.ssd.post_processing.score_converter = ( post_processing_pb2.PostProcessing.SIGMOID) pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.model.ssd.num_classes = 2 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 tflite_graph_file = self._export_graph_with_postprocessing_op( pipeline_config) self.assertTrue(os.path.exists(tflite_graph_file)) graph = tf.Graph() with graph.as_default(): graph_def = tf.GraphDef() with tf.gfile.Open(tflite_graph_file, mode='rb') as f: graph_def.ParseFromString(f.read()) all_op_names = [node.name for node in graph_def.node] self.assertIn('TFLite_Detection_PostProcess', all_op_names) self.assertNotIn('UnattachedTensor', all_op_names) for node in graph_def.node: if node.name == 'TFLite_Detection_PostProcess': self.assertTrue(node.attr['_output_quantized'].b) self.assertTrue( node.attr['_support_output_type_float_in_quantized_op'].b) self.assertEqual(node.attr['y_scale'].f, 10.0) self.assertEqual(node.attr['x_scale'].f, 10.0) self.assertEqual(node.attr['h_scale'].f, 5.0) self.assertEqual(node.attr['w_scale'].f, 5.0) self.assertEqual(node.attr['num_classes'].i, 2) self.assertTrue( all([ t == types_pb2.DT_FLOAT for t in node.attr['_output_types'].list.type ])) def test_export_tflite_graph_with_additional_tensors(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 tflite_graph_file = self._export_graph( pipeline_config, additional_output_tensors=['UnattachedTensor']) self.assertTrue(os.path.exists(tflite_graph_file)) graph = tf.Graph() with graph.as_default(): graph_def = tf.GraphDef() with tf.gfile.Open(tflite_graph_file, mode='rb') as f: graph_def.ParseFromString(f.read()) all_op_names = [node.name for node in graph_def.node] self.assertIn('UnattachedTensor', all_op_names) def test_export_tflite_graph_with_postprocess_op_and_additional_tensors(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False pipeline_config.model.ssd.post_processing.score_converter = ( post_processing_pb2.PostProcessing.SIGMOID) pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.model.ssd.num_classes = 2 tflite_graph_file = self._export_graph_with_postprocessing_op( pipeline_config, additional_output_tensors=['UnattachedTensor']) self.assertTrue(os.path.exists(tflite_graph_file)) graph = tf.Graph() with graph.as_default(): graph_def = tf.GraphDef() with tf.gfile.Open(tflite_graph_file, mode='rb') as f: graph_def.ParseFromString(f.read()) all_op_names = [node.name for node in graph_def.node] self.assertIn('TFLite_Detection_PostProcess', all_op_names) self.assertIn('UnattachedTensor', all_op_names) @mock.patch.object(exporter, 'rewrite_nn_resize_op') def test_export_with_nn_resize_op_not_called_without_fpn(self, mock_get): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 tflite_graph_file = self._export_graph_with_postprocessing_op( pipeline_config) self.assertTrue(os.path.exists(tflite_graph_file)) mock_get.assert_not_called() @mock.patch.object(exporter, 'rewrite_nn_resize_op') def test_export_with_nn_resize_op_called_with_fpn(self, mock_get): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.model.ssd.feature_extractor.fpn.min_level = 3 pipeline_config.model.ssd.feature_extractor.fpn.max_level = 7 tflite_graph_file = self._export_graph_with_postprocessing_op( pipeline_config) self.assertTrue(os.path.exists(tflite_graph_file)) self.assertEqual(1, mock_get.call_count) if __name__ == '__main__': tf.test.main()
20,390
46.866197
80
py
models
models-master/research/object_detection/model_main_tf2.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Creates and runs TF2 object detection models. For local training/evaluation run: PIPELINE_CONFIG_PATH=path/to/pipeline.config MODEL_DIR=/tmp/model_outputs NUM_TRAIN_STEPS=10000 SAMPLE_1_OF_N_EVAL_EXAMPLES=1 python model_main_tf2.py -- \ --model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \ --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \ --pipeline_config_path=$PIPELINE_CONFIG_PATH \ --alsologtostderr """ from absl import flags import tensorflow.compat.v2 as tf from object_detection import model_lib_v2 flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' 'file.') flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') flags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train ' 'data (only supported in distributed training).') flags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of ' 'every n eval input examples, where n is provided.') flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' 'one of every n train input examples for evaluation, ' 'where n is provided. This is only used if ' '`eval_training_data` is True.') flags.DEFINE_string( 'model_dir', None, 'Path to output model directory ' 'where event and checkpoint files will be written.') flags.DEFINE_string( 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If ' '`checkpoint_dir` is provided, this binary operates in eval-only mode, ' 'writing resulting metrics to `model_dir`.') flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an' 'evaluation checkpoint before exiting.') flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.') flags.DEFINE_string( 'tpu_name', default=None, help='Name of the Cloud TPU for Cluster Resolvers.') flags.DEFINE_integer( 'num_workers', 1, 'When num_workers > 1, training uses ' 'MultiWorkerMirroredStrategy. When num_workers = 1 it uses ' 'MirroredStrategy.') flags.DEFINE_integer( 'checkpoint_every_n', 1000, 'Integer defining how often we checkpoint.') flags.DEFINE_boolean('record_summaries', True, ('Whether or not to record summaries defined by the model' ' or the training pipeline. This does not impact the' ' summaries of the loss values which are always' ' recorded.')) FLAGS = flags.FLAGS def main(unused_argv): flags.mark_flag_as_required('model_dir') flags.mark_flag_as_required('pipeline_config_path') tf.config.set_soft_device_placement(True) if FLAGS.checkpoint_dir: model_lib_v2.eval_continuously( pipeline_config_path=FLAGS.pipeline_config_path, model_dir=FLAGS.model_dir, train_steps=FLAGS.num_train_steps, sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, sample_1_of_n_eval_on_train_examples=( FLAGS.sample_1_of_n_eval_on_train_examples), checkpoint_dir=FLAGS.checkpoint_dir, wait_interval=300, timeout=FLAGS.eval_timeout) else: if FLAGS.use_tpu: # TPU is automatically inferred if tpu_name is None and # we are running under cloud ai-platform. resolver = tf.distribute.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.experimental.TPUStrategy(resolver) elif FLAGS.num_workers > 1: strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() else: strategy = tf.compat.v2.distribute.MirroredStrategy() with strategy.scope(): model_lib_v2.train_loop( pipeline_config_path=FLAGS.pipeline_config_path, model_dir=FLAGS.model_dir, train_steps=FLAGS.num_train_steps, use_tpu=FLAGS.use_tpu, checkpoint_every_n=FLAGS.checkpoint_every_n, record_summaries=FLAGS.record_summaries) if __name__ == '__main__': tf.compat.v1.app.run()
4,918
41.773913
80
py
models
models-master/research/object_detection/export_tflite_ssd_graph_lib.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Exports an SSD detection model to use with tf-lite. See export_tflite_ssd_graph.py for usage. """ import os import tempfile import numpy as np import tensorflow.compat.v1 as tf from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import saver_pb2 from object_detection import exporter from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.builders import post_processing_builder from object_detection.core import box_list from object_detection.utils import tf_version _DEFAULT_NUM_CHANNELS = 3 _DEFAULT_NUM_COORD_BOX = 4 if tf_version.is_tf1(): from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=g-import-not-at-top def get_const_center_size_encoded_anchors(anchors): """Exports center-size encoded anchors as a constant tensor. Args: anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor boxes Returns: encoded_anchors: a float32 constant tensor of shape [num_anchors, 4] containing the anchor boxes. """ anchor_boxlist = box_list.BoxList(anchors) y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes() num_anchors = y.get_shape().as_list() with tf.Session() as sess: y_out, x_out, h_out, w_out = sess.run([y, x, h, w]) encoded_anchors = tf.constant( np.transpose(np.stack((y_out, x_out, h_out, w_out))), dtype=tf.float32, shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX], name='anchors') return encoded_anchors def append_postprocessing_op(frozen_graph_def, max_detections, max_classes_per_detection, nms_score_threshold, nms_iou_threshold, num_classes, scale_values, detections_per_class=100, use_regular_nms=False, additional_output_tensors=()): """Appends postprocessing custom op. Args: frozen_graph_def: Frozen GraphDef for SSD model after freezing the checkpoint max_detections: Maximum number of detections (boxes) to show max_classes_per_detection: Number of classes to display per detection nms_score_threshold: Score threshold used in Non-maximal suppression in post-processing nms_iou_threshold: Intersection-over-union threshold used in Non-maximal suppression in post-processing num_classes: number of classes in SSD detector scale_values: scale values is a dict with following key-value pairs {y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode centersize boxes detections_per_class: In regular NonMaxSuppression, number of anchors used for NonMaxSuppression per class use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of Fast NMS. additional_output_tensors: Array of additional tensor names to output. Tensors are appended after postprocessing output. Returns: transformed_graph_def: Frozen GraphDef with postprocessing custom op appended TFLite_Detection_PostProcess custom op node has four outputs: detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box locations detection_classes: a float32 tensor of shape [1, num_boxes] with class indices detection_scores: a float32 tensor of shape [1, num_boxes] with class scores num_boxes: a float32 tensor of size 1 containing the number of detected boxes """ new_output = frozen_graph_def.node.add() new_output.op = 'TFLite_Detection_PostProcess' new_output.name = 'TFLite_Detection_PostProcess' new_output.attr['_output_quantized'].CopyFrom( attr_value_pb2.AttrValue(b=True)) new_output.attr['_output_types'].list.type.extend([ types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT, types_pb2.DT_FLOAT ]) new_output.attr['_support_output_type_float_in_quantized_op'].CopyFrom( attr_value_pb2.AttrValue(b=True)) new_output.attr['max_detections'].CopyFrom( attr_value_pb2.AttrValue(i=max_detections)) new_output.attr['max_classes_per_detection'].CopyFrom( attr_value_pb2.AttrValue(i=max_classes_per_detection)) new_output.attr['nms_score_threshold'].CopyFrom( attr_value_pb2.AttrValue(f=nms_score_threshold.pop())) new_output.attr['nms_iou_threshold'].CopyFrom( attr_value_pb2.AttrValue(f=nms_iou_threshold.pop())) new_output.attr['num_classes'].CopyFrom( attr_value_pb2.AttrValue(i=num_classes)) new_output.attr['y_scale'].CopyFrom( attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop())) new_output.attr['x_scale'].CopyFrom( attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop())) new_output.attr['h_scale'].CopyFrom( attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop())) new_output.attr['w_scale'].CopyFrom( attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop())) new_output.attr['detections_per_class'].CopyFrom( attr_value_pb2.AttrValue(i=detections_per_class)) new_output.attr['use_regular_nms'].CopyFrom( attr_value_pb2.AttrValue(b=use_regular_nms)) new_output.input.extend( ['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors']) # Transform the graph to append new postprocessing op input_names = [] output_names = ['TFLite_Detection_PostProcess' ] + list(additional_output_tensors) transforms = ['strip_unused_nodes'] transformed_graph_def = TransformGraph(frozen_graph_def, input_names, output_names, transforms) return transformed_graph_def def export_tflite_graph(pipeline_config, trained_checkpoint_prefix, output_dir, add_postprocessing_op, max_detections, max_classes_per_detection, detections_per_class=100, use_regular_nms=False, binary_graph_name='tflite_graph.pb', txt_graph_name='tflite_graph.pbtxt', additional_output_tensors=()): """Exports a tflite compatible graph and anchors for ssd detection model. Anchors are written to a tensor and tflite compatible graph is written to output_dir/tflite_graph.pb. Args: pipeline_config: a pipeline.proto object containing the configuration for SSD model to export. trained_checkpoint_prefix: a file prefix for the checkpoint containing the trained parameters of the SSD model. output_dir: A directory to write the tflite graph and anchor file to. add_postprocessing_op: If add_postprocessing_op is true: frozen graph adds a TFLite_Detection_PostProcess custom op max_detections: Maximum number of detections (boxes) to show max_classes_per_detection: Number of classes to display per detection detections_per_class: In regular NonMaxSuppression, number of anchors used for NonMaxSuppression per class use_regular_nms: Flag to set postprocessing op to use Regular NMS instead of Fast NMS. binary_graph_name: Name of the exported graph file in binary format. txt_graph_name: Name of the exported graph file in text format. additional_output_tensors: Array of additional tensor names to output. Additional tensors are appended to the end of output tensor list. Raises: ValueError: if the pipeline config contains models other than ssd or uses an fixed_shape_resizer and provides a shape as well. """ tf.gfile.MakeDirs(output_dir) if pipeline_config.model.WhichOneof('model') != 'ssd': raise ValueError('Only ssd models are supported in tflite. ' 'Found {} in config'.format( pipeline_config.model.WhichOneof('model'))) num_classes = pipeline_config.model.ssd.num_classes nms_score_threshold = { pipeline_config.model.ssd.post_processing.batch_non_max_suppression .score_threshold } nms_iou_threshold = { pipeline_config.model.ssd.post_processing.batch_non_max_suppression .iou_threshold } scale_values = {} scale_values['y_scale'] = { pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale } scale_values['x_scale'] = { pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale } scale_values['h_scale'] = { pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale } scale_values['w_scale'] = { pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale } image_resizer_config = pipeline_config.model.ssd.image_resizer image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof') num_channels = _DEFAULT_NUM_CHANNELS if image_resizer == 'fixed_shape_resizer': height = image_resizer_config.fixed_shape_resizer.height width = image_resizer_config.fixed_shape_resizer.width if image_resizer_config.fixed_shape_resizer.convert_to_grayscale: num_channels = 1 shape = [1, height, width, num_channels] else: raise ValueError( 'Only fixed_shape_resizer' 'is supported with tflite. Found {}'.format( image_resizer_config.WhichOneof('image_resizer_oneof'))) image = tf.placeholder( tf.float32, shape=shape, name='normalized_input_image_tensor') detection_model = model_builder.build( pipeline_config.model, is_training=False) predicted_tensors = detection_model.predict(image, true_image_shapes=None) # The score conversion occurs before the post-processing custom op _, score_conversion_fn = post_processing_builder.build( pipeline_config.model.ssd.post_processing) class_predictions = score_conversion_fn( predicted_tensors['class_predictions_with_background']) with tf.name_scope('raw_outputs'): # 'raw_outputs/box_encodings': a float32 tensor of shape [1, num_anchors, 4] # containing the encoded box predictions. Note that these are raw # predictions and no Non-Max suppression is applied on them and # no decode center size boxes is applied to them. tf.identity(predicted_tensors['box_encodings'], name='box_encodings') # 'raw_outputs/class_predictions': a float32 tensor of shape # [1, num_anchors, num_classes] containing the class scores for each anchor # after applying score conversion. tf.identity(class_predictions, name='class_predictions') # 'anchors': a float32 tensor of shape # [4, num_anchors] containing the anchors as a constant node. tf.identity( get_const_center_size_encoded_anchors(predicted_tensors['anchors']), name='anchors') # Add global step to the graph, so we know the training step number when we # evaluate the model. tf.train.get_or_create_global_step() # graph rewriter is_quantized = pipeline_config.HasField('graph_rewriter') if is_quantized: graph_rewriter_config = pipeline_config.graph_rewriter graph_rewriter_fn = graph_rewriter_builder.build( graph_rewriter_config, is_training=False) graph_rewriter_fn() if pipeline_config.model.ssd.feature_extractor.HasField('fpn'): exporter.rewrite_nn_resize_op(is_quantized) # freeze the graph saver_kwargs = {} if pipeline_config.eval_config.use_moving_averages: saver_kwargs['write_version'] = saver_pb2.SaverDef.V1 moving_average_checkpoint = tempfile.NamedTemporaryFile() exporter.replace_variable_values_with_moving_averages( tf.get_default_graph(), trained_checkpoint_prefix, moving_average_checkpoint.name) checkpoint_to_use = moving_average_checkpoint.name else: checkpoint_to_use = trained_checkpoint_prefix saver = tf.train.Saver(**saver_kwargs) input_saver_def = saver.as_saver_def() frozen_graph_def = exporter.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=input_saver_def, input_checkpoint=checkpoint_to_use, output_node_names=','.join([ 'raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors' ] + list(additional_output_tensors)), restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', clear_devices=True, output_graph='', initializer_nodes='') # Add new operation to do post processing in a custom op (TF Lite only) if add_postprocessing_op: transformed_graph_def = append_postprocessing_op( frozen_graph_def, max_detections, max_classes_per_detection, nms_score_threshold, nms_iou_threshold, num_classes, scale_values, detections_per_class, use_regular_nms, additional_output_tensors=additional_output_tensors) else: # Return frozen without adding post-processing custom op transformed_graph_def = frozen_graph_def binary_graph = os.path.join(output_dir, binary_graph_name) with tf.gfile.GFile(binary_graph, 'wb') as f: f.write(transformed_graph_def.SerializeToString()) txt_graph = os.path.join(output_dir, txt_graph_name) with tf.gfile.GFile(txt_graph, 'w') as f: f.write(str(transformed_graph_def))
14,085
41.173653
101
py
models
models-master/research/object_detection/model_lib_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object detection model library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import unittest import numpy as np import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from object_detection import inputs from object_detection import model_hparams from object_detection import model_lib from object_detection.builders import model_builder from object_detection.core import standard_fields as fields from object_detection.utils import config_util from object_detection.utils import tf_version # Model for test. Options are: # 'ssd_inception_v2_pets', 'faster_rcnn_resnet50_pets' MODEL_NAME_FOR_TEST = 'ssd_inception_v2_pets' # Model for testing keypoints. MODEL_NAME_FOR_KEYPOINTS_TEST = 'ssd_mobilenet_v1_fpp' # Model for testing tfSequenceExample inputs. MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST = 'context_rcnn_camera_trap' def _get_data_path(model_name): """Returns an absolute path to TFRecord file.""" if model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', 'snapshot_serengeti_sequence_examples.record') else: return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', 'pets_examples.record') def get_pipeline_config_path(model_name): """Returns path to the local pipeline config file.""" if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST: return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', model_name + '.config') elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', model_name + '.config') else: return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', 'configs', model_name + '.config') def _get_labelmap_path(): """Returns an absolute path to label map file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'data', 'pet_label_map.pbtxt') def _get_keypoints_labelmap_path(): """Returns an absolute path to label map file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'data', 'face_person_with_keypoints_label_map.pbtxt') def _get_sequence_example_labelmap_path(): """Returns an absolute path to label map file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'data', 'snapshot_serengeti_label_map.pbtxt') def _get_configs_for_model(model_name): """Returns configurations for model.""" filename = get_pipeline_config_path(model_name) data_path = _get_data_path(model_name) if model_name == MODEL_NAME_FOR_KEYPOINTS_TEST: label_map_path = _get_keypoints_labelmap_path() elif model_name == MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST: label_map_path = _get_sequence_example_labelmap_path() else: label_map_path = _get_labelmap_path() configs = config_util.get_configs_from_pipeline_file(filename) override_dict = { 'train_input_path': data_path, 'eval_input_path': data_path, 'label_map_path': label_map_path } configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) return configs def _make_initializable_iterator(dataset): """Creates an iterator, and initializes tables. Args: dataset: A `tf.data.Dataset` object. Returns: A `tf.data.Iterator`. """ iterator = tf.data.make_initializable_iterator(dataset) tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) return iterator @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class ModelLibTest(tf.test.TestCase): @classmethod def setUpClass(cls): tf.reset_default_graph() def _assert_model_fn_for_train_eval(self, configs, mode, class_agnostic=False): model_config = configs['model'] train_config = configs['train_config'] with tf.Graph().as_default(): if mode == 'train': features, labels = _make_initializable_iterator( inputs.create_train_input_fn(configs['train_config'], configs['train_input_config'], configs['model'])()).get_next() model_mode = tf_estimator.ModeKeys.TRAIN batch_size = train_config.batch_size elif mode == 'eval': features, labels = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['eval_input_config'], configs['model'])()).get_next() model_mode = tf_estimator.ModeKeys.EVAL batch_size = 1 elif mode == 'eval_on_train': features, labels = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['train_input_config'], configs['model'])()).get_next() model_mode = tf_estimator.ModeKeys.EVAL batch_size = 1 detection_model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=True) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, labels, model_mode) self.assertIsNotNone(estimator_spec.loss) self.assertIsNotNone(estimator_spec.predictions) if mode == 'eval' or mode == 'eval_on_train': if class_agnostic: self.assertNotIn('detection_classes', estimator_spec.predictions) else: detection_classes = estimator_spec.predictions['detection_classes'] self.assertEqual(batch_size, detection_classes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_classes.dtype) detection_boxes = estimator_spec.predictions['detection_boxes'] detection_scores = estimator_spec.predictions['detection_scores'] num_detections = estimator_spec.predictions['num_detections'] self.assertEqual(batch_size, detection_boxes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_boxes.dtype) self.assertEqual(batch_size, detection_scores.shape.as_list()[0]) self.assertEqual(tf.float32, detection_scores.dtype) self.assertEqual(tf.float32, num_detections.dtype) if mode == 'eval': self.assertIn('Detections_Left_Groundtruth_Right/0', estimator_spec.eval_metric_ops) if model_mode == tf_estimator.ModeKeys.TRAIN: self.assertIsNotNone(estimator_spec.train_op) return estimator_spec def _assert_model_fn_for_predict(self, configs): model_config = configs['model'] with tf.Graph().as_default(): features, _ = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['eval_input_config'], configs['model'])()).get_next() detection_model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=False) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, None, tf_estimator.ModeKeys.PREDICT) self.assertIsNone(estimator_spec.loss) self.assertIsNone(estimator_spec.train_op) self.assertIsNotNone(estimator_spec.predictions) self.assertIsNotNone(estimator_spec.export_outputs) self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME, estimator_spec.export_outputs) def test_model_fn_in_train_mode(self): """Tests the model function in TRAIN mode.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_train_mode_sequences(self): """Tests the model function in TRAIN mode.""" configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_train_mode_freeze_all_variables(self): """Tests model_fn TRAIN mode with all variables frozen.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) configs['train_config'].freeze_variables.append('.*') with self.assertRaisesRegexp(ValueError, 'No variables to optimize'): self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_train_mode_freeze_all_included_variables(self): """Tests model_fn TRAIN mode with all included variables frozen.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) train_config = configs['train_config'] train_config.update_trainable_variables.append('FeatureExtractor') train_config.freeze_variables.append('.*') with self.assertRaisesRegexp(ValueError, 'No variables to optimize'): self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_train_mode_freeze_box_predictor(self): """Tests model_fn TRAIN mode with FeatureExtractor variables frozen.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) train_config = configs['train_config'] train_config.update_trainable_variables.append('FeatureExtractor') train_config.update_trainable_variables.append('BoxPredictor') train_config.freeze_variables.append('FeatureExtractor') self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_eval_mode(self): """Tests the model function in EVAL mode.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) self._assert_model_fn_for_train_eval(configs, 'eval') def test_model_fn_in_eval_mode_sequences(self): """Tests the model function in EVAL mode.""" configs = _get_configs_for_model(MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) self._assert_model_fn_for_train_eval(configs, 'eval') def test_model_fn_in_keypoints_eval_mode(self): """Tests the model function in EVAL mode with keypoints config.""" configs = _get_configs_for_model(MODEL_NAME_FOR_KEYPOINTS_TEST) estimator_spec = self._assert_model_fn_for_train_eval(configs, 'eval') metric_ops = estimator_spec.eval_metric_ops self.assertIn('Keypoints_Precision/mAP ByCategory/face', metric_ops) self.assertIn('Keypoints_Precision/mAP ByCategory/PERSON', metric_ops) detection_keypoints = estimator_spec.predictions['detection_keypoints'] self.assertEqual(1, detection_keypoints.shape.as_list()[0]) self.assertEqual(tf.float32, detection_keypoints.dtype) def test_model_fn_in_eval_on_train_mode(self): """Tests the model function in EVAL mode with train data.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) self._assert_model_fn_for_train_eval(configs, 'eval_on_train') def test_model_fn_in_predict_mode(self): """Tests the model function in PREDICT mode.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) self._assert_model_fn_for_predict(configs) def test_create_estimator_and_inputs(self): """Tests that Estimator and input function are constructed correctly.""" run_config = tf_estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) train_steps = 20 train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps) estimator = train_and_eval_dict['estimator'] train_steps = train_and_eval_dict['train_steps'] self.assertIsInstance(estimator, tf_estimator.Estimator) self.assertEqual(20, train_steps) self.assertIn('train_input_fn', train_and_eval_dict) self.assertIn('eval_input_fns', train_and_eval_dict) self.assertIn('eval_on_train_input_fn', train_and_eval_dict) def test_create_estimator_and_inputs_sequence_example(self): """Tests that Estimator and input function are constructed correctly.""" run_config = tf_estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path( MODEL_NAME_FOR_SEQUENCE_EXAMPLE_TEST) train_steps = 20 train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps) estimator = train_and_eval_dict['estimator'] train_steps = train_and_eval_dict['train_steps'] self.assertIsInstance(estimator, tf_estimator.Estimator) self.assertEqual(20, train_steps) self.assertIn('train_input_fn', train_and_eval_dict) self.assertIn('eval_input_fns', train_and_eval_dict) self.assertIn('eval_on_train_input_fn', train_and_eval_dict) def test_create_estimator_with_default_train_eval_steps(self): """Tests that number of train/eval defaults to config values.""" run_config = tf_estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) config_train_steps = configs['train_config'].num_steps train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path) estimator = train_and_eval_dict['estimator'] train_steps = train_and_eval_dict['train_steps'] self.assertIsInstance(estimator, tf_estimator.Estimator) self.assertEqual(config_train_steps, train_steps) def test_create_tpu_estimator_and_inputs(self): """Tests that number of train/eval defaults to config values.""" run_config = tf_estimator.tpu.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) train_steps = 20 train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps, use_tpu_estimator=True) estimator = train_and_eval_dict['estimator'] train_steps = train_and_eval_dict['train_steps'] self.assertIsInstance(estimator, tf_estimator.tpu.TPUEstimator) self.assertEqual(20, train_steps) def test_create_train_and_eval_specs(self): """Tests that `TrainSpec` and `EvalSpec` is created correctly.""" run_config = tf_estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) train_steps = 20 train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps) train_input_fn = train_and_eval_dict['train_input_fn'] eval_input_fns = train_and_eval_dict['eval_input_fns'] eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] predict_input_fn = train_and_eval_dict['predict_input_fn'] train_steps = train_and_eval_dict['train_steps'] train_spec, eval_specs = model_lib.create_train_and_eval_specs( train_input_fn, eval_input_fns, eval_on_train_input_fn, predict_input_fn, train_steps, eval_on_train_data=True, final_exporter_name='exporter', eval_spec_names=['holdout']) self.assertEqual(train_steps, train_spec.max_steps) self.assertEqual(2, len(eval_specs)) self.assertEqual(None, eval_specs[0].steps) self.assertEqual('holdout', eval_specs[0].name) self.assertEqual('exporter', eval_specs[0].exporters[0].name) self.assertEqual(None, eval_specs[1].steps) self.assertEqual('eval_on_train', eval_specs[1].name) def test_experiment(self): """Tests that the `Experiment` object is constructed correctly.""" run_config = tf_estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) experiment = model_lib.populate_experiment( run_config, hparams, pipeline_config_path, train_steps=10, eval_steps=20) self.assertEqual(10, experiment.train_steps) self.assertEqual(None, experiment.eval_steps) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class UnbatchTensorsTest(tf.test.TestCase): def test_unbatch_without_unpadding(self): image_placeholder = tf.placeholder(tf.float32, [2, None, None, None]) groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, None, None]) groundtruth_classes_placeholder = tf.placeholder(tf.float32, [2, None, None]) groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, None]) tensor_dict = { fields.InputDataFields.image: image_placeholder, fields.InputDataFields.groundtruth_boxes: groundtruth_boxes_placeholder, fields.InputDataFields.groundtruth_classes: groundtruth_classes_placeholder, fields.InputDataFields.groundtruth_weights: groundtruth_weights_placeholder } unbatched_tensor_dict = model_lib.unstack_batch( tensor_dict, unpad_groundtruth_tensors=False) with self.test_session() as sess: unbatched_tensor_dict_out = sess.run( unbatched_tensor_dict, feed_dict={ image_placeholder: np.random.rand(2, 4, 4, 3).astype(np.float32), groundtruth_boxes_placeholder: np.random.rand(2, 5, 4).astype(np.float32), groundtruth_classes_placeholder: np.random.rand(2, 5, 6).astype(np.float32), groundtruth_weights_placeholder: np.random.rand(2, 5).astype(np.float32) }) for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]: self.assertAllEqual(image_out.shape, [4, 4, 3]) for groundtruth_boxes_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_boxes]: self.assertAllEqual(groundtruth_boxes_out.shape, [5, 4]) for groundtruth_classes_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_classes]: self.assertAllEqual(groundtruth_classes_out.shape, [5, 6]) for groundtruth_weights_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_weights]: self.assertAllEqual(groundtruth_weights_out.shape, [5]) def test_unbatch_and_unpad_groundtruth_tensors(self): image_placeholder = tf.placeholder(tf.float32, [2, None, None, None]) groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, 5, None]) groundtruth_classes_placeholder = tf.placeholder(tf.float32, [2, 5, None]) groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, 5]) num_groundtruth_placeholder = tf.placeholder(tf.int32, [2]) tensor_dict = { fields.InputDataFields.image: image_placeholder, fields.InputDataFields.groundtruth_boxes: groundtruth_boxes_placeholder, fields.InputDataFields.groundtruth_classes: groundtruth_classes_placeholder, fields.InputDataFields.groundtruth_weights: groundtruth_weights_placeholder, fields.InputDataFields.num_groundtruth_boxes: num_groundtruth_placeholder } unbatched_tensor_dict = model_lib.unstack_batch( tensor_dict, unpad_groundtruth_tensors=True) with self.test_session() as sess: unbatched_tensor_dict_out = sess.run( unbatched_tensor_dict, feed_dict={ image_placeholder: np.random.rand(2, 4, 4, 3).astype(np.float32), groundtruth_boxes_placeholder: np.random.rand(2, 5, 4).astype(np.float32), groundtruth_classes_placeholder: np.random.rand(2, 5, 6).astype(np.float32), groundtruth_weights_placeholder: np.random.rand(2, 5).astype(np.float32), num_groundtruth_placeholder: np.array([3, 3], np.int32) }) for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]: self.assertAllEqual(image_out.shape, [4, 4, 3]) for groundtruth_boxes_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_boxes]: self.assertAllEqual(groundtruth_boxes_out.shape, [3, 4]) for groundtruth_classes_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_classes]: self.assertAllEqual(groundtruth_classes_out.shape, [3, 6]) for groundtruth_weights_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_weights]: self.assertAllEqual(groundtruth_weights_out.shape, [3]) if __name__ == '__main__': tf.test.main()
22,230
42.848126
80
py
models
models-master/research/object_detection/model_tpu_main.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Creates and runs `Estimator` for object detection model on TPUs. This uses the TPUEstimator API to define and run a model in TRAIN/EVAL modes. """ # pylint: enable=line-too-long from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import flags import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from object_detection import model_lib tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs') # Cloud TPU Cluster Resolvers flags.DEFINE_string( 'gcp_project', default=None, help='Project name for the Cloud TPU-enabled project. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu_zone', default=None, help='GCE zone where the Cloud TPU is located in. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu_name', default=None, help='Name of the Cloud TPU for Cluster Resolvers.') flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).') flags.DEFINE_integer('iterations_per_loop', 100, 'Number of iterations per TPU training loop.') # For mode=train_and_eval, evaluation occurs after training is finished. # Note: independently of steps_per_checkpoint, estimator will save the most # recent checkpoint every 10 minutes by default for train_and_eval flags.DEFINE_string('mode', 'train', 'Mode to run: train, eval') flags.DEFINE_integer('train_batch_size', None, 'Batch size for training. If ' 'this is not provided, batch size is read from training ' 'config.') flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') flags.DEFINE_boolean('eval_training_data', False, 'If training data should be evaluated for this job.') flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of ' 'every n eval input examples, where n is provided.') flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' 'one of every n train input examples for evaluation, ' 'where n is provided. This is only used if ' '`eval_training_data` is True.') flags.DEFINE_string( 'model_dir', None, 'Path to output model directory ' 'where event and checkpoint files will be written.') flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' 'file.') flags.DEFINE_integer( 'max_eval_retries', 0, 'If running continuous eval, the maximum number of ' 'retries upon encountering tf.errors.InvalidArgumentError. If negative, ' 'will always retry the evaluation.' ) FLAGS = tf.flags.FLAGS def main(unused_argv): flags.mark_flag_as_required('model_dir') flags.mark_flag_as_required('pipeline_config_path') tpu_cluster_resolver = ( tf.distribute.cluster_resolver.TPUClusterResolver( tpu=[FLAGS.tpu_name], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)) tpu_grpc_url = tpu_cluster_resolver.get_master() config = tf_estimator.tpu.RunConfig( master=tpu_grpc_url, evaluation_master=tpu_grpc_url, model_dir=FLAGS.model_dir, tpu_config=tf_estimator.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_shards)) kwargs = {} if FLAGS.train_batch_size: kwargs['batch_size'] = FLAGS.train_batch_size train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config=config, pipeline_config_path=FLAGS.pipeline_config_path, train_steps=FLAGS.num_train_steps, sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, sample_1_of_n_eval_on_train_examples=( FLAGS.sample_1_of_n_eval_on_train_examples), use_tpu_estimator=True, use_tpu=FLAGS.use_tpu, num_shards=FLAGS.num_shards, save_final_config=FLAGS.mode == 'train', **kwargs) estimator = train_and_eval_dict['estimator'] train_input_fn = train_and_eval_dict['train_input_fn'] eval_input_fns = train_and_eval_dict['eval_input_fns'] eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] train_steps = train_and_eval_dict['train_steps'] if FLAGS.mode == 'train': estimator.train(input_fn=train_input_fn, max_steps=train_steps) # Continuously evaluating. if FLAGS.mode == 'eval': if FLAGS.eval_training_data: name = 'training_data' input_fn = eval_on_train_input_fn else: name = 'validation_data' # Currently only a single eval input is allowed. input_fn = eval_input_fns[0] model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps, name, FLAGS.max_eval_retries) if __name__ == '__main__': tf.app.run()
5,648
39.06383
80
py
models
models-master/research/object_detection/export_tflite_graph_lib_tf2_test.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for export_tflite_graph_lib_tf2.py.""" from __future__ import division import os import unittest import six import tensorflow.compat.v2 as tf from object_detection import export_tflite_graph_lib_tf2 from object_detection.builders import model_builder from object_detection.core import model from object_detection.protos import pipeline_pb2 from object_detection.utils import tf_version from google.protobuf import text_format if six.PY2: import mock # pylint: disable=g-importing-member,g-import-not-at-top else: from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top class FakeModel(model.DetectionModel): def __init__(self): super(FakeModel, self).__init__(num_classes=2) self._conv = tf.keras.layers.Conv2D( filters=1, kernel_size=1, strides=(1, 1), padding='valid', kernel_initializer=tf.keras.initializers.Constant(value=1.0)) def preprocess(self, inputs): true_image_shapes = [] # Doesn't matter for the fake model. return tf.identity(inputs), true_image_shapes def predict(self, preprocessed_inputs, true_image_shapes): prediction_tensors = {'image': self._conv(preprocessed_inputs)} with tf.control_dependencies([prediction_tensors['image']]): prediction_tensors['box_encodings'] = tf.constant( [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]], tf.float32) prediction_tensors['class_predictions_with_background'] = tf.constant( [[[0.7, 0.6], [0.9, 0.0]]], tf.float32) with tf.control_dependencies([ tf.convert_to_tensor( prediction_tensors['image'].get_shape().as_list()[1:3]) ]): prediction_tensors['anchors'] = tf.constant( [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]], tf.float32) return prediction_tensors def postprocess(self, prediction_dict, true_image_shapes): predict_tensor_sum = tf.reduce_sum(prediction_dict['image']) with tf.control_dependencies(list(prediction_dict.values())): postprocessed_tensors = { 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]], tf.float32), 'detection_scores': predict_tensor_sum + tf.constant([[0.7, 0.6], [0.9, 0.0]], tf.float32), 'detection_classes': tf.constant([[0, 1], [1, 0]], tf.float32), 'num_detections': tf.constant([2, 1], tf.float32), 'detection_keypoints': tf.zeros([2, 17, 2], tf.float32), 'detection_keypoint_scores': tf.zeros([2, 17], tf.float32), } return postprocessed_tensors def restore_map(self, checkpoint_path, from_detection_checkpoint): pass def restore_from_objects(self, fine_tune_checkpoint_type): pass def loss(self, prediction_dict, true_image_shapes): pass def regularization_losses(self): pass def updates(self): pass @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ExportTfLiteGraphTest(tf.test.TestCase): def _save_checkpoint_from_mock_model(self, checkpoint_dir): mock_model = FakeModel() fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) mock_model.postprocess(predictions, true_image_shapes) ckpt = tf.train.Checkpoint(model=mock_model) exported_checkpoint_manager = tf.train.CheckpointManager( ckpt, checkpoint_dir, max_to_keep=1) exported_checkpoint_manager.save(checkpoint_number=0) def _get_ssd_config(self): pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.height = 10 pipeline_config.model.ssd.image_resizer.fixed_shape_resizer.width = 10 pipeline_config.model.ssd.num_classes = 2 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale = 10.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale = 5.0 pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale = 5.0 pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold = 0.5 return pipeline_config def _get_center_net_config(self): pipeline_config_text = """ model { center_net { num_classes: 1 feature_extractor { type: "mobilenet_v2_fpn" } image_resizer { fixed_shape_resizer { height: 10 width: 10 } } object_detection_task { localization_loss { l1_localization_loss { } } } object_center_params { classification_loss { } max_box_predictions: 20 } keypoint_estimation_task { loss { localization_loss { l1_localization_loss { } } classification_loss { penalty_reduced_logistic_focal_loss { } } } } } } """ return text_format.Parse( pipeline_config_text, pipeline_pb2.TrainEvalPipelineConfig()) # The tf.implements signature is important since it ensures MLIR legalization, # so we test it here. def test_postprocess_implements_signature(self): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir) pipeline_config = self._get_ssd_config() with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() detection_model = model_builder.build( pipeline_config.model, is_training=False) ckpt = tf.train.Checkpoint(model=detection_model) manager = tf.train.CheckpointManager(ckpt, tmp_dir, max_to_keep=1) ckpt.restore(manager.latest_checkpoint).expect_partial() # The module helps build a TF graph appropriate for TFLite conversion. detection_module = export_tflite_graph_lib_tf2.SSDModule( pipeline_config=pipeline_config, detection_model=detection_model, max_detections=20, use_regular_nms=True) expected_signature = ('name: "TFLite_Detection_PostProcess" attr { key: ' '"max_detections" value { i: 20 } } attr { key: ' '"max_classes_per_detection" value { i: 1 } } attr ' '{ key: "use_regular_nms" value { b: true } } attr ' '{ key: "nms_score_threshold" value { f: 0.000000 }' ' } attr { key: "nms_iou_threshold" value { f: ' '0.500000 } } attr { key: "y_scale" value { f: ' '10.000000 } } attr { key: "x_scale" value { f: ' '10.000000 } } attr { key: "h_scale" value { f: ' '5.000000 } } attr { key: "w_scale" value { f: ' '5.000000 } } attr { key: "num_classes" value { i: ' '2 } }') self.assertEqual(expected_signature, detection_module.postprocess_implements_signature()) def test_unsupported_architecture(self): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.faster_rcnn.num_classes = 10 with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') expected_message = 'Only ssd or center_net models are supported in tflite' try: export_tflite_graph_lib_tf2.export_tflite_model( pipeline_config=pipeline_config, trained_checkpoint_dir=tmp_dir, output_directory=output_directory, max_detections=10, use_regular_nms=False) except ValueError as e: if expected_message not in str(e): raise else: raise AssertionError('Exception not raised: %s' % expected_message) def test_export_yields_saved_model(self): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') export_tflite_graph_lib_tf2.export_tflite_model( pipeline_config=self._get_ssd_config(), trained_checkpoint_dir=tmp_dir, output_directory=output_directory, max_detections=10, use_regular_nms=False) self.assertTrue( os.path.exists( os.path.join(output_directory, 'saved_model', 'saved_model.pb'))) self.assertTrue( os.path.exists( os.path.join(output_directory, 'saved_model', 'variables', 'variables.index'))) self.assertTrue( os.path.exists( os.path.join(output_directory, 'saved_model', 'variables', 'variables.data-00000-of-00001'))) def test_exported_model_inference(self): tmp_dir = self.get_temp_dir() output_directory = os.path.join(tmp_dir, 'output') self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() export_tflite_graph_lib_tf2.export_tflite_model( pipeline_config=self._get_ssd_config(), trained_checkpoint_dir=tmp_dir, output_directory=output_directory, max_detections=10, use_regular_nms=False) saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) detect_fn_sig = detect_fn.signatures['serving_default'] image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) detections = detect_fn_sig(image) # The exported graph doesn't have numerically correct outputs, but there # should be 4. self.assertEqual(4, len(detections)) def test_center_net_inference_object_detection(self): tmp_dir = self.get_temp_dir() output_directory = os.path.join(tmp_dir, 'output') self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() export_tflite_graph_lib_tf2.export_tflite_model( pipeline_config=self._get_center_net_config(), trained_checkpoint_dir=tmp_dir, output_directory=output_directory, max_detections=10, use_regular_nms=False) saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) detect_fn_sig = detect_fn.signatures['serving_default'] image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) detections = detect_fn_sig(image) # The exported graph doesn't have numerically correct outputs, but there # should be 4. self.assertEqual(4, len(detections)) def test_center_net_inference_keypoint(self): tmp_dir = self.get_temp_dir() output_directory = os.path.join(tmp_dir, 'output') self._save_checkpoint_from_mock_model(tmp_dir) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() export_tflite_graph_lib_tf2.export_tflite_model( pipeline_config=self._get_center_net_config(), trained_checkpoint_dir=tmp_dir, output_directory=output_directory, max_detections=10, use_regular_nms=False, include_keypoints=True) saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) detect_fn_sig = detect_fn.signatures['serving_default'] image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) detections = detect_fn_sig(image) # The exported graph doesn't have numerically correct outputs, but there # should be 6 (4 for boxes, 2 for keypoints). self.assertEqual(6, len(detections)) if __name__ == '__main__': tf.test.main()
13,194
37.581871
91
py
models
models-master/research/object_detection/export_tflite_graph_lib_tf2.py
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library to export TFLite-compatible SavedModel from TF2 detection models.""" import os import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from object_detection.builders import model_builder from object_detection.builders import post_processing_builder from object_detection.core import box_list from object_detection.core import standard_fields as fields _DEFAULT_NUM_CHANNELS = 3 _DEFAULT_NUM_COORD_BOX = 4 _MAX_CLASSES_PER_DETECTION = 1 _DETECTION_POSTPROCESS_FUNC = 'TFLite_Detection_PostProcess' def get_const_center_size_encoded_anchors(anchors): """Exports center-size encoded anchors as a constant tensor. Args: anchors: a float32 tensor of shape [num_anchors, 4] containing the anchor boxes Returns: encoded_anchors: a float32 constant tensor of shape [num_anchors, 4] containing the anchor boxes. """ anchor_boxlist = box_list.BoxList(anchors) y, x, h, w = anchor_boxlist.get_center_coordinates_and_sizes() num_anchors = y.get_shape().as_list() with tf1.Session() as sess: y_out, x_out, h_out, w_out = sess.run([y, x, h, w]) encoded_anchors = tf1.constant( np.transpose(np.stack((y_out, x_out, h_out, w_out))), dtype=tf1.float32, shape=[num_anchors[0], _DEFAULT_NUM_COORD_BOX], name='anchors') return num_anchors[0], encoded_anchors class SSDModule(tf.Module): """Inference Module for TFLite-friendly SSD models.""" def __init__(self, pipeline_config, detection_model, max_detections, use_regular_nms): """Initialization. Args: pipeline_config: The original pipeline_pb2.TrainEvalPipelineConfig detection_model: The detection model to use for inference. max_detections: Max detections desired from the TFLite model. use_regular_nms: If True, TFLite model uses the (slower) multi-class NMS. """ self._process_config(pipeline_config) self._pipeline_config = pipeline_config self._model = detection_model self._max_detections = max_detections self._use_regular_nms = use_regular_nms def _process_config(self, pipeline_config): self._num_classes = pipeline_config.model.ssd.num_classes self._nms_score_threshold = pipeline_config.model.ssd.post_processing.batch_non_max_suppression.score_threshold self._nms_iou_threshold = pipeline_config.model.ssd.post_processing.batch_non_max_suppression.iou_threshold self._scale_values = {} self._scale_values[ 'y_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.y_scale self._scale_values[ 'x_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.x_scale self._scale_values[ 'h_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.height_scale self._scale_values[ 'w_scale'] = pipeline_config.model.ssd.box_coder.faster_rcnn_box_coder.width_scale image_resizer_config = pipeline_config.model.ssd.image_resizer image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof') self._num_channels = _DEFAULT_NUM_CHANNELS if image_resizer == 'fixed_shape_resizer': self._height = image_resizer_config.fixed_shape_resizer.height self._width = image_resizer_config.fixed_shape_resizer.width if image_resizer_config.fixed_shape_resizer.convert_to_grayscale: self._num_channels = 1 else: raise ValueError( 'Only fixed_shape_resizer' 'is supported with tflite. Found {}'.format( image_resizer_config.WhichOneof('image_resizer_oneof'))) def input_shape(self): """Returns shape of TFLite model input.""" return [1, self._height, self._width, self._num_channels] def postprocess_implements_signature(self): """Returns tf.implements signature for MLIR legalization of TFLite NMS.""" implements_signature = [ 'name: "%s"' % _DETECTION_POSTPROCESS_FUNC, 'attr { key: "max_detections" value { i: %d } }' % self._max_detections, 'attr { key: "max_classes_per_detection" value { i: %d } }' % _MAX_CLASSES_PER_DETECTION, 'attr { key: "use_regular_nms" value { b: %s } }' % str(self._use_regular_nms).lower(), 'attr { key: "nms_score_threshold" value { f: %f } }' % self._nms_score_threshold, 'attr { key: "nms_iou_threshold" value { f: %f } }' % self._nms_iou_threshold, 'attr { key: "y_scale" value { f: %f } }' % self._scale_values['y_scale'], 'attr { key: "x_scale" value { f: %f } }' % self._scale_values['x_scale'], 'attr { key: "h_scale" value { f: %f } }' % self._scale_values['h_scale'], 'attr { key: "w_scale" value { f: %f } }' % self._scale_values['w_scale'], 'attr { key: "num_classes" value { i: %d } }' % self._num_classes ] implements_signature = ' '.join(implements_signature) return implements_signature def _get_postprocess_fn(self, num_anchors, num_classes): # There is no TF equivalent for TFLite's custom post-processing op. # So we add an 'empty' composite function here, that is legalized to the # custom op with MLIR. @tf.function( experimental_implements=self.postprocess_implements_signature()) # pylint: disable=g-unused-argument,unused-argument def dummy_post_processing(box_encodings, class_predictions, anchors): boxes = tf.constant(0.0, dtype=tf.float32, name='boxes') scores = tf.constant(0.0, dtype=tf.float32, name='scores') classes = tf.constant(0.0, dtype=tf.float32, name='classes') num_detections = tf.constant(0.0, dtype=tf.float32, name='num_detections') return boxes, classes, scores, num_detections return dummy_post_processing @tf.function def inference_fn(self, image): """Encapsulates SSD inference for TFLite conversion. NOTE: The Args & Returns sections below indicate the TFLite model signature, and not what the TF graph does (since the latter does not include the custom NMS op used by TFLite) Args: image: a float32 tensor of shape [num_anchors, 4] containing the anchor boxes Returns: num_detections: a float32 scalar denoting number of total detections. classes: a float32 tensor denoting class ID for each detection. scores: a float32 tensor denoting score for each detection. boxes: a float32 tensor denoting coordinates of each detected box. """ predicted_tensors = self._model.predict(image, true_image_shapes=None) # The score conversion occurs before the post-processing custom op _, score_conversion_fn = post_processing_builder.build( self._pipeline_config.model.ssd.post_processing) class_predictions = score_conversion_fn( predicted_tensors['class_predictions_with_background']) with tf.name_scope('raw_outputs'): # 'raw_outputs/box_encodings': a float32 tensor of shape # [1, num_anchors, 4] containing the encoded box predictions. Note that # these are raw predictions and no Non-Max suppression is applied on # them and no decode center size boxes is applied to them. box_encodings = tf.identity( predicted_tensors['box_encodings'], name='box_encodings') # 'raw_outputs/class_predictions': a float32 tensor of shape # [1, num_anchors, num_classes] containing the class scores for each # anchor after applying score conversion. class_predictions = tf.identity( class_predictions, name='class_predictions') # 'anchors': a float32 tensor of shape # [4, num_anchors] containing the anchors as a constant node. num_anchors, anchors = get_const_center_size_encoded_anchors( predicted_tensors['anchors']) anchors = tf.identity(anchors, name='anchors') # tf.function@ seems to reverse order of inputs, so reverse them here. return self._get_postprocess_fn(num_anchors, self._num_classes)(box_encodings, class_predictions, anchors)[::-1] class CenterNetModule(tf.Module): """Inference Module for TFLite-friendly CenterNet models. The exported CenterNet model includes the preprocessing and postprocessing logics so the caller should pass in the raw image pixel values. It supports both object detection and keypoint estimation task. """ def __init__(self, pipeline_config, max_detections, include_keypoints, label_map_path=''): """Initialization. Args: pipeline_config: The original pipeline_pb2.TrainEvalPipelineConfig max_detections: Max detections desired from the TFLite model. include_keypoints: If set true, the output dictionary will include the keypoint coordinates and keypoint confidence scores. label_map_path: Path to the label map which is used by CenterNet keypoint estimation task. If provided, the label_map_path in the configuration will be replaced by this one. """ self._max_detections = max_detections self._include_keypoints = include_keypoints self._process_config(pipeline_config) if include_keypoints and label_map_path: pipeline_config.model.center_net.keypoint_label_map_path = label_map_path self._pipeline_config = pipeline_config self._model = model_builder.build( self._pipeline_config.model, is_training=False) def get_model(self): return self._model def _process_config(self, pipeline_config): self._num_classes = pipeline_config.model.center_net.num_classes center_net_config = pipeline_config.model.center_net image_resizer_config = center_net_config.image_resizer image_resizer = image_resizer_config.WhichOneof('image_resizer_oneof') self._num_channels = _DEFAULT_NUM_CHANNELS if image_resizer == 'fixed_shape_resizer': self._height = image_resizer_config.fixed_shape_resizer.height self._width = image_resizer_config.fixed_shape_resizer.width if image_resizer_config.fixed_shape_resizer.convert_to_grayscale: self._num_channels = 1 else: raise ValueError( 'Only fixed_shape_resizer' 'is supported with tflite. Found {}'.format(image_resizer)) center_net_config.object_center_params.max_box_predictions = ( self._max_detections) if not self._include_keypoints: del center_net_config.keypoint_estimation_task[:] def input_shape(self): """Returns shape of TFLite model input.""" return [1, self._height, self._width, self._num_channels] @tf.function def inference_fn(self, image): """Encapsulates CenterNet inference for TFLite conversion. Args: image: a float32 tensor of shape [1, image_height, image_width, channel] denoting the image pixel values. Returns: A dictionary of predicted tensors: classes: a float32 tensor with shape [1, max_detections] denoting class ID for each detection. scores: a float32 tensor with shape [1, max_detections] denoting score for each detection. boxes: a float32 tensor with shape [1, max_detections, 4] denoting coordinates of each detected box. keypoints: a float32 with shape [1, max_detections, num_keypoints, 2] denoting the predicted keypoint coordinates (normalized in between 0-1). Note that [:, :, :, 0] represents the y coordinates and [:, :, :, 1] represents the x coordinates. keypoint_scores: a float32 with shape [1, max_detections, num_keypoints] denoting keypoint confidence scores. """ image = tf.cast(image, tf.float32) image, shapes = self._model.preprocess(image) prediction_dict = self._model.predict(image, None) detections = self._model.postprocess( prediction_dict, true_image_shapes=shapes) field_names = fields.DetectionResultFields classes_field = field_names.detection_classes classes = tf.cast(detections[classes_field], tf.float32) num_detections = tf.cast(detections[field_names.num_detections], tf.float32) if self._include_keypoints: model_outputs = (detections[field_names.detection_boxes], classes, detections[field_names.detection_scores], num_detections, detections[field_names.detection_keypoints], detections[field_names.detection_keypoint_scores]) else: model_outputs = (detections[field_names.detection_boxes], classes, detections[field_names.detection_scores], num_detections) # tf.function@ seems to reverse order of inputs, so reverse them here. return model_outputs[::-1] def export_tflite_model(pipeline_config, trained_checkpoint_dir, output_directory, max_detections, use_regular_nms, include_keypoints=False, label_map_path=''): """Exports inference SavedModel for TFLite conversion. NOTE: Only supports SSD meta-architectures for now, and the output model will have static-shaped, single-batch input. This function creates `output_directory` if it does not already exist, which will hold the intermediate SavedModel that can be used with the TFLite converter. Args: pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. trained_checkpoint_dir: Path to the trained checkpoint file. output_directory: Path to write outputs. max_detections: Max detections desired from the TFLite model. use_regular_nms: If True, TFLite model uses the (slower) multi-class NMS. Note that this argument is only used by the SSD model. include_keypoints: Decides whether to also output the keypoint predictions. Note that this argument is only used by the CenterNet model. label_map_path: Path to the label map which is used by CenterNet keypoint estimation task. If provided, the label_map_path in the configuration will be replaced by this one. Raises: ValueError: if pipeline is invalid. """ output_saved_model_directory = os.path.join(output_directory, 'saved_model') # Build the underlying model using pipeline config. # TODO(b/162842801): Add support for other architectures. if pipeline_config.model.WhichOneof('model') == 'ssd': detection_model = model_builder.build( pipeline_config.model, is_training=False) ckpt = tf.train.Checkpoint(model=detection_model) # The module helps build a TF SavedModel appropriate for TFLite conversion. detection_module = SSDModule(pipeline_config, detection_model, max_detections, use_regular_nms) elif pipeline_config.model.WhichOneof('model') == 'center_net': detection_module = CenterNetModule( pipeline_config, max_detections, include_keypoints, label_map_path=label_map_path) ckpt = tf.train.Checkpoint(model=detection_module.get_model()) else: raise ValueError('Only ssd or center_net models are supported in tflite. ' 'Found {} in config'.format( pipeline_config.model.WhichOneof('model'))) manager = tf.train.CheckpointManager( ckpt, trained_checkpoint_dir, max_to_keep=1) status = ckpt.restore(manager.latest_checkpoint).expect_partial() # Getting the concrete function traces the graph and forces variables to # be constructed; only after this can we save the saved model. status.assert_existing_objects_matched() concrete_function = detection_module.inference_fn.get_concrete_function( tf.TensorSpec( shape=detection_module.input_shape(), dtype=tf.float32, name='input')) status.assert_existing_objects_matched() # Export SavedModel. tf.saved_model.save( detection_module, output_saved_model_directory, signatures=concrete_function)
16,604
43.28
115
py
models
models-master/research/object_detection/core/minibatch_sampler_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for google3.research.vale.object_detection.minibatch_sampler.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.core import minibatch_sampler from object_detection.utils import test_case class MinibatchSamplerTest(test_case.TestCase): def test_subsample_indicator_when_more_true_elements_than_num_samples(self): np_indicator = np.array([True, False, True, False, True, True, False]) def graph_fn(indicator): samples = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 3) return samples samples_out = self.execute(graph_fn, [np_indicator]) self.assertTrue(np.sum(samples_out), 3) self.assertAllEqual(samples_out, np.logical_and(samples_out, np_indicator)) def test_subsample_indicator_when_less_true_elements_than_num_samples(self): np_indicator = np.array([True, False, True, False, True, True, False]) def graph_fn(indicator): samples = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 5) return samples samples_out = self.execute(graph_fn, [np_indicator]) self.assertTrue(np.sum(samples_out), 4) self.assertAllEqual(samples_out, np.logical_and(samples_out, np_indicator)) def test_subsample_indicator_when_num_samples_is_zero(self): np_indicator = np.array([True, False, True, False, True, True, False]) def graph_fn(indicator): samples_none = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 0) return samples_none samples_out = self.execute(graph_fn, [np_indicator]) self.assertAllEqual( np.zeros_like(samples_out, dtype=bool), samples_out) def test_subsample_indicator_when_indicator_all_false(self): indicator_empty = np.zeros([0], dtype=bool) def graph_fn(indicator): samples_empty = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 4) return samples_empty samples_out = self.execute(graph_fn, [indicator_empty]) self.assertEqual(0, samples_out.size) if __name__ == '__main__': tf.test.main()
2,842
38.486111
80
py
models
models-master/research/object_detection/core/post_processing.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Post-processing operations on detected boxes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import keypoint_ops from object_detection.core import standard_fields as fields from object_detection.utils import shape_utils _NMS_TILE_SIZE = 512 def batch_iou(boxes1, boxes2): """Calculates the overlap between proposal and ground truth boxes. Some `boxes2` may have been padded. The returned `iou` tensor for these boxes will be -1. Args: boxes1: a tensor with a shape of [batch_size, N, 4]. N is the number of proposals before groundtruth assignment. The last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. boxes2: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This tensor might have paddings with a negative value. Returns: iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES]. """ with tf.name_scope('BatchIOU'): y1_min, x1_min, y1_max, x1_max = tf.split( value=boxes1, num_or_size_splits=4, axis=2) y2_min, x2_min, y2_max, x2_max = tf.split( value=boxes2, num_or_size_splits=4, axis=2) # Calculates the intersection area. intersection_xmin = tf.maximum(x1_min, tf.transpose(x2_min, [0, 2, 1])) intersection_xmax = tf.minimum(x1_max, tf.transpose(x2_max, [0, 2, 1])) intersection_ymin = tf.maximum(y1_min, tf.transpose(y2_min, [0, 2, 1])) intersection_ymax = tf.minimum(y1_max, tf.transpose(y2_max, [0, 2, 1])) intersection_area = tf.maximum( (intersection_xmax - intersection_xmin), 0) * tf.maximum( (intersection_ymax - intersection_ymin), 0) # Calculates the union area. area1 = (y1_max - y1_min) * (x1_max - x1_min) area2 = (y2_max - y2_min) * (x2_max - x2_min) # Adds a small epsilon to avoid divide-by-zero. union_area = area1 + tf.transpose(area2, [0, 2, 1]) - intersection_area + 1e-8 # Calculates IoU. iou = intersection_area / union_area # Fills -1 for padded ground truth boxes. padding_mask = tf.logical_and( tf.less(intersection_xmax, 0), tf.less(intersection_ymax, 0)) iou = tf.where(padding_mask, -tf.ones_like(iou), iou) return iou def _self_suppression(iou, iou_threshold, loop_condition, iou_sum): """Bounding-boxes self-suppression loop body. Args: iou: A float Tensor with shape [1, num_boxes, max_num_instance]: IOUs. iou_threshold: A scalar, representing IOU threshold. loop_condition: The loop condition returned from last iteration. iou_sum: iou_sum_new returned from last iteration. Returns: iou_suppressed: A float Tensor with shape [1, num_boxes, max_num_instance], IOU after suppression. iou_threshold: A scalar, representing IOU threshold. loop_condition: Bool Tensor of shape [], the loop condition. iou_sum_new: The new IOU sum. """ del loop_condition can_suppress_others = tf.cast( tf.reshape(tf.reduce_max(iou, 1) <= iou_threshold, [1, -1, 1]), iou.dtype) iou_suppressed = tf.reshape( tf.cast( tf.reduce_max(can_suppress_others * iou, 1) <= iou_threshold, iou.dtype), [1, -1, 1]) * iou iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2]) return [ iou_suppressed, iou_threshold, tf.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new ] def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx): """Bounding-boxes cross-suppression loop body. Args: boxes: A float Tensor of shape [1, anchors, 4], representing boxes. box_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile returned from last iteration iou_threshold: A scalar, representing IOU threshold. inner_idx: A scalar, representing inner index. Returns: boxes: A float Tensor of shape [1, anchors, 4], representing boxes. ret_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile after suppression iou_threshold: A scalar, representing IOU threshold. inner_idx: A scalar, inner index incremented. """ new_slice = tf.slice(boxes, [0, inner_idx * _NMS_TILE_SIZE, 0], [1, _NMS_TILE_SIZE, 4]) iou = batch_iou(new_slice, box_slice) ret_slice = tf.expand_dims( tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), 2) * box_slice return boxes, ret_slice, iou_threshold, inner_idx + 1 def _suppression_loop_body(boxes, iou_threshold, output_size, idx): """Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE). Args: boxes: a tensor with a shape of [1, anchors, 4]. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. output_size: an int32 tensor of size [1]. Representing the number of selected boxes. idx: an integer scalar representing induction variable. Returns: boxes: updated boxes. iou_threshold: pass down iou_threshold to the next iteration. output_size: the updated output_size. idx: the updated induction variable. """ num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE # Iterates over tiles that can possibly suppress the current tile. box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0], [1, _NMS_TILE_SIZE, 4]) _, box_slice, _, _ = tf.while_loop( lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, _cross_suppression, [boxes, box_slice, iou_threshold, tf.constant(0)]) # Iterates over the current tile to compute self-suppression. iou = batch_iou(box_slice, box_slice) mask = tf.expand_dims( tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape( tf.range(_NMS_TILE_SIZE), [-1, 1]), 0) iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype) suppressed_iou, _, _, _ = tf.while_loop( lambda _iou, _threshold, loop_condition, _iou_sum: loop_condition, _self_suppression, [iou, iou_threshold, tf.constant(True), tf.reduce_sum(iou, [1, 2])]) suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0 box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2) # Uses box_slice to update the input boxes. mask = tf.reshape( tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) boxes = tf.tile(tf.expand_dims(box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape( boxes, [1, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask) boxes = tf.reshape(boxes, [1, -1, 4]) # Updates output_size. output_size += tf.reduce_sum( tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1]) return boxes, iou_threshold, output_size, idx + 1 def partitioned_non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf')): """A tiled version of [`tf.image.non_max_suppression_padded`](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression_padded). The overall design of the algorithm is to handle boxes tile-by-tile: boxes = boxes.pad_to_multiple_of(tile_size) num_tiles = len(boxes) // tile_size output_boxes = [] for i in range(num_tiles): box_tile = boxes[i*tile_size : (i+1)*tile_size] for j in range(i - 1): suppressing_tile = boxes[j*tile_size : (j+1)*tile_size] iou = batch_iou(box_tile, suppressing_tile) # if the box is suppressed in iou, clear it to a dot box_tile *= _update_boxes(iou) # Iteratively handle the diagonal tile. iou = _box_overlap(box_tile, box_tile) iou_changed = True while iou_changed: # boxes that are not suppressed by anything else suppressing_boxes = _get_suppressing_boxes(iou) # boxes that are suppressed by suppressing_boxes suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes) # clear iou to 0 for boxes that are suppressed, as they cannot be used # to suppress other boxes any more new_iou = _clear_iou(iou, suppressed_boxes) iou_changed = (new_iou != iou) iou = new_iou # remaining boxes that can still suppress others, are selected boxes. output_boxes.append(_get_suppressing_boxes(iou)) if len(output_boxes) >= max_output_size: break Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: a scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. Returns: selected_indices: a tensor of shape [anchors]. num_valid_boxes: a scalar int tensor. nms_proposals: a tensor with a shape of [anchors, 4]. It has same dtype as input boxes. nms_scores: a tensor with a shape of [anchors]. It has same dtype as input scores. argsort_ids: a tensor of shape [anchors], mapping from input order of boxes to output order of boxes. """ num_boxes = tf.shape(boxes)[0] pad = tf.cast( tf.ceil(tf.cast(num_boxes, tf.float32) / _NMS_TILE_SIZE), tf.int32) * _NMS_TILE_SIZE - num_boxes scores, argsort_ids = tf.nn.top_k(scores, k=num_boxes, sorted=True) boxes = tf.gather(boxes, argsort_ids) num_boxes = tf.shape(boxes)[0] num_boxes += pad boxes = tf.pad( tf.cast(boxes, tf.float32), [[0, pad], [0, 0]], constant_values=-1) scores = tf.pad(tf.cast(scores, tf.float32), [[0, pad]]) # mask boxes to -1 by score threshold scores_mask = tf.expand_dims( tf.cast(scores > score_threshold, boxes.dtype), axis=1) boxes = ((boxes + 1.) * scores_mask) - 1. boxes = tf.expand_dims(boxes, axis=0) scores = tf.expand_dims(scores, axis=0) def _loop_cond(unused_boxes, unused_threshold, output_size, idx): return tf.logical_and( tf.reduce_min(output_size) < max_output_size, idx < num_boxes // _NMS_TILE_SIZE) selected_boxes, _, output_size, _ = tf.while_loop( _loop_cond, _suppression_loop_body, [boxes, iou_threshold, tf.zeros([1], tf.int32), tf.constant(0)]) idx = num_boxes - tf.cast( tf.nn.top_k( tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) * tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0], tf.int32) idx = tf.minimum(idx, num_boxes - 1 - pad) idx = tf.reshape(idx + tf.reshape(tf.range(1) * num_boxes, [-1, 1]), [-1]) num_valid_boxes = tf.reduce_sum(output_size) return (idx, num_valid_boxes, tf.reshape(boxes, [-1, 4]), tf.reshape(scores, [-1]), argsort_ids) def _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, change_coordinate_frame, clip_window): """Validates boxes, scores and iou_thresh. This function validates the boxes, scores, iou_thresh and if change_coordinate_frame is True, clip_window must be specified. Args: boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either number of classes or 1 depending on whether a separate box is predicted per class. scores: A [k, num_classes] float32 tensor containing the scores for each of the k detections. The scores have to be non-negative when pad_to_max_output_size is True. iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap with previously selected boxes are removed). change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided) clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] representing the window to clip and normalize boxes to before performing non-max suppression. Raises: ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have a valid scores field. """ if not 0 <= iou_thresh <= 1.0: raise ValueError('iou_thresh must be between 0 and 1') if scores.shape.ndims != 2: raise ValueError('scores field must be of rank 2') if shape_utils.get_dim_as_int(scores.shape[1]) is None: raise ValueError('scores must have statically defined second ' 'dimension') if boxes.shape.ndims != 3: raise ValueError('boxes must be of rank 3.') if not (shape_utils.get_dim_as_int( boxes.shape[1]) == shape_utils.get_dim_as_int(scores.shape[1]) or shape_utils.get_dim_as_int(boxes.shape[1]) == 1): raise ValueError('second dimension of boxes must be either 1 or equal ' 'to the second dimension of scores') if shape_utils.get_dim_as_int(boxes.shape[2]) != 4: raise ValueError('last dimension of boxes must be of size 4.') if change_coordinate_frame and clip_window is None: raise ValueError('if change_coordinate_frame is True, then a clip_window' 'must be specified.') def _clip_window_prune_boxes(sorted_boxes, clip_window, pad_to_max_output_size, change_coordinate_frame): """Prune boxes with zero area. Args: sorted_boxes: A BoxList containing k detections. clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] representing the window to clip and normalize boxes to before performing non-max suppression. pad_to_max_output_size: flag indicating whether to pad to max output size or not. change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided). Returns: sorted_boxes: A BoxList containing k detections after pruning. num_valid_nms_boxes_cumulative: Number of valid NMS boxes """ sorted_boxes = box_list_ops.clip_to_window( sorted_boxes, clip_window, filter_nonoverlapping=not pad_to_max_output_size) # Set the scores of boxes with zero area to -1 to keep the default # behaviour of pruning out zero area boxes. sorted_boxes_size = tf.shape(sorted_boxes.get())[0] non_zero_box_area = tf.cast(box_list_ops.area(sorted_boxes), tf.bool) sorted_boxes_scores = tf.where( non_zero_box_area, sorted_boxes.get_field(fields.BoxListFields.scores), -1 * tf.ones(sorted_boxes_size)) sorted_boxes.add_field(fields.BoxListFields.scores, sorted_boxes_scores) num_valid_nms_boxes_cumulative = tf.reduce_sum( tf.cast(tf.greater_equal(sorted_boxes_scores, 0), tf.int32)) sorted_boxes = box_list_ops.sort_by_field(sorted_boxes, fields.BoxListFields.scores) if change_coordinate_frame: sorted_boxes = box_list_ops.change_coordinate_frame(sorted_boxes, clip_window) if sorted_boxes.has_field(fields.BoxListFields.keypoints): sorted_keypoints = sorted_boxes.get_field(fields.BoxListFields.keypoints) sorted_keypoints = keypoint_ops.change_coordinate_frame(sorted_keypoints, clip_window) sorted_boxes.set_field(fields.BoxListFields.keypoints, sorted_keypoints) return sorted_boxes, num_valid_nms_boxes_cumulative def _clip_boxes(boxes, clip_window): """Clips boxes to the given window. Args: boxes: A [batch, num_boxes, 4] float32 tensor containing box coordinates in [ymin, xmin, ymax, xmax] form. clip_window: A [batch, 4] float32 tensor with left top and right bottom coordinate of the window in [ymin, xmin, ymax, xmax] form. Returns: A [batch, num_boxes, 4] float32 tensor containing boxes clipped to the given window. """ ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=-1) clipped_ymin = tf.maximum(ymin, clip_window[:, 0, tf.newaxis]) clipped_xmin = tf.maximum(xmin, clip_window[:, 1, tf.newaxis]) clipped_ymax = tf.minimum(ymax, clip_window[:, 2, tf.newaxis]) clipped_xmax = tf.minimum(xmax, clip_window[:, 3, tf.newaxis]) return tf.stack([clipped_ymin, clipped_xmin, clipped_ymax, clipped_xmax], axis=-1) class NullContextmanager(object): def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False def multiclass_non_max_suppression(boxes, scores, score_thresh, iou_thresh, max_size_per_class, max_total_size=0, clip_window=None, change_coordinate_frame=False, masks=None, boundaries=None, pad_to_max_output_size=False, use_partitioned_nms=False, additional_fields=None, soft_nms_sigma=0.0, use_hard_nms=False, use_cpu_nms=False, scope=None): """Multi-class version of non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. It operates independently for each class for which scores are provided (via the scores field of the input box_list), pruning boxes with score less than a provided threshold prior to applying NMS. Please note that this operation is performed on *all* classes, therefore any background classes should be removed prior to calling this function. Selected boxes are guaranteed to be sorted in decreasing order by score (but the sort is not guaranteed to be stable). Args: boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either number of classes or 1 depending on whether a separate box is predicted per class. scores: A [k, num_classes] float32 tensor containing the scores for each of the k detections. The scores have to be non-negative when pad_to_max_output_size is True. score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap with previously selected boxes are removed). max_size_per_class: maximum number of retained boxes per class. max_total_size: maximum number of boxes retained over all classes. By default returns all boxes retained after capping boxes per class. clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] representing the window to clip and normalize boxes to before performing non-max suppression. change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided) masks: (optional) a [k, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. boundaries: (optional) a [k, q, boundary_height, boundary_width] float32 tensor containing box boundaries. `q` can be either number of classes or 1 depending on whether a separate boundary is predicted per class. pad_to_max_output_size: If true, the output nmsed boxes are padded to be of length `max_size_per_class`. Defaults to false. use_partitioned_nms: If true, use partitioned version of non_max_suppression. additional_fields: (optional) If not None, a dictionary that maps keys to tensors whose first dimensions are all of size `k`. After non-maximum suppression, all tensors corresponding to the selected boxes will be added to resulting BoxList. soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; See Bodla et al, https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) NMS. Soft NMS is currently only supported when pad_to_max_output_size is False. use_hard_nms: Enforce the usage of hard NMS. use_cpu_nms: Enforce NMS to run on CPU. scope: name scope. Returns: A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a BoxList holds M boxes with a rank-1 scores field representing corresponding scores for each box with scores sorted in decreasing order and a rank-1 classes field representing a class label for each box. The num_valid_nms_boxes is a 0-D integer tensor representing the number of valid elements in `BoxList`, with the valid elements appearing first. Raises: ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have a valid scores field. ValueError: if Soft NMS (tf.image.non_max_suppression_with_scores) is not supported in the current TF version and `soft_nms_sigma` is nonzero. """ _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, change_coordinate_frame, clip_window) if pad_to_max_output_size and soft_nms_sigma != 0.0: raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not ' 'supported when pad_to_max_output_size is True.') with tf.name_scope(scope, 'MultiClassNonMaxSuppression'), tf.device( 'cpu:0') if use_cpu_nms else NullContextmanager(): num_scores = tf.shape(scores)[0] num_classes = shape_utils.get_dim_as_int(scores.get_shape()[1]) selected_boxes_list = [] num_valid_nms_boxes_cumulative = tf.constant(0) per_class_boxes_list = tf.unstack(boxes, axis=1) if masks is not None: per_class_masks_list = tf.unstack(masks, axis=1) if boundaries is not None: per_class_boundaries_list = tf.unstack(boundaries, axis=1) boxes_ids = (range(num_classes) if len(per_class_boxes_list) > 1 else [0] * num_classes) for class_idx, boxes_idx in zip(range(num_classes), boxes_ids): per_class_boxes = per_class_boxes_list[boxes_idx] boxlist_and_class_scores = box_list.BoxList(per_class_boxes) class_scores = tf.reshape( tf.slice(scores, [0, class_idx], tf.stack([num_scores, 1])), [-1]) boxlist_and_class_scores.add_field(fields.BoxListFields.scores, class_scores) if masks is not None: per_class_masks = per_class_masks_list[boxes_idx] boxlist_and_class_scores.add_field(fields.BoxListFields.masks, per_class_masks) if boundaries is not None: per_class_boundaries = per_class_boundaries_list[boxes_idx] boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, per_class_boundaries) if additional_fields is not None: for key, tensor in additional_fields.items(): boxlist_and_class_scores.add_field(key, tensor) nms_result = None selected_scores = None if pad_to_max_output_size: max_selection_size = max_size_per_class if use_partitioned_nms: (selected_indices, num_valid_nms_boxes, boxlist_and_class_scores.data['boxes'], boxlist_and_class_scores.data['scores'], _) = partitioned_non_max_suppression_padded( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh) else: selected_indices, num_valid_nms_boxes = ( tf.image.non_max_suppression_padded( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field( fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh, pad_to_max_output_size=True)) nms_result = box_list_ops.gather(boxlist_and_class_scores, selected_indices) selected_scores = nms_result.get_field(fields.BoxListFields.scores) else: max_selection_size = tf.minimum(max_size_per_class, boxlist_and_class_scores.num_boxes()) if (hasattr(tf.image, 'non_max_suppression_with_scores') and tf.compat.forward_compatible(2019, 6, 6) and not use_hard_nms): (selected_indices, selected_scores ) = tf.image.non_max_suppression_with_scores( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh, soft_nms_sigma=soft_nms_sigma) num_valid_nms_boxes = tf.shape(selected_indices)[0] selected_indices = tf.concat( [selected_indices, tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) selected_scores = tf.concat( [selected_scores, tf.zeros(max_selection_size-num_valid_nms_boxes, tf.float32)], -1) nms_result = box_list_ops.gather(boxlist_and_class_scores, selected_indices) else: if soft_nms_sigma != 0: raise ValueError('Soft NMS not supported in current TF version!') selected_indices = tf.image.non_max_suppression( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh) num_valid_nms_boxes = tf.shape(selected_indices)[0] selected_indices = tf.concat( [selected_indices, tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) nms_result = box_list_ops.gather(boxlist_and_class_scores, selected_indices) selected_scores = nms_result.get_field(fields.BoxListFields.scores) # Make the scores -1 for invalid boxes. valid_nms_boxes_indices = tf.less( tf.range(max_selection_size), num_valid_nms_boxes) nms_result.add_field( fields.BoxListFields.scores, tf.where(valid_nms_boxes_indices, selected_scores, -1*tf.ones(max_selection_size))) num_valid_nms_boxes_cumulative += num_valid_nms_boxes nms_result.add_field( fields.BoxListFields.classes, (tf.zeros_like( nms_result.get_field(fields.BoxListFields.scores)) + class_idx)) selected_boxes_list.append(nms_result) selected_boxes = box_list_ops.concatenate(selected_boxes_list) sorted_boxes = box_list_ops.sort_by_field(selected_boxes, fields.BoxListFields.scores) if clip_window is not None: # When pad_to_max_output_size is False, it prunes the boxes with zero # area. sorted_boxes, num_valid_nms_boxes_cumulative = _clip_window_prune_boxes( sorted_boxes, clip_window, pad_to_max_output_size, change_coordinate_frame) if max_total_size: max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) num_valid_nms_boxes_cumulative = tf.where( max_total_size > num_valid_nms_boxes_cumulative, num_valid_nms_boxes_cumulative, max_total_size) # Select only the valid boxes if pad_to_max_output_size is False. if not pad_to_max_output_size: sorted_boxes = box_list_ops.gather( sorted_boxes, tf.range(num_valid_nms_boxes_cumulative)) return sorted_boxes, num_valid_nms_boxes_cumulative def class_agnostic_non_max_suppression(boxes, scores, score_thresh, iou_thresh, max_classes_per_detection=1, max_total_size=0, clip_window=None, change_coordinate_frame=False, masks=None, boundaries=None, pad_to_max_output_size=False, use_partitioned_nms=False, additional_fields=None, soft_nms_sigma=0.0, scope=None): """Class-agnostic version of non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. It operates on all the boxes using max scores across all classes for which scores are provided (via the scores field of the input box_list), pruning boxes with score less than a provided threshold prior to applying NMS. Please note that this operation is performed in a class-agnostic way, therefore any background classes should be removed prior to calling this function. Selected boxes are guaranteed to be sorted in decreasing order by score (but the sort is not guaranteed to be stable). Args: boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either number of classes or 1 depending on whether a separate box is predicted per class. scores: A [k, num_classes] float32 tensor containing the scores for each of the k detections. The scores have to be non-negative when pad_to_max_output_size is True. score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap with previously selected boxes are removed). max_classes_per_detection: maximum number of retained classes per detection box in class-agnostic NMS. max_total_size: maximum number of boxes retained over all classes. By default returns all boxes retained after capping boxes per class. clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] representing the window to clip and normalize boxes to before performing non-max suppression. change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided) masks: (optional) a [k, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. boundaries: (optional) a [k, q, boundary_height, boundary_width] float32 tensor containing box boundaries. `q` can be either number of classes or 1 depending on whether a separate boundary is predicted per class. pad_to_max_output_size: If true, the output nmsed boxes are padded to be of length `max_size_per_class`. Defaults to false. use_partitioned_nms: If true, use partitioned version of non_max_suppression. additional_fields: (optional) If not None, a dictionary that maps keys to tensors whose first dimensions are all of size `k`. After non-maximum suppression, all tensors corresponding to the selected boxes will be added to resulting BoxList. soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; See Bodla et al, https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) NMS. Soft NMS is currently only supported when pad_to_max_output_size is False. scope: name scope. Returns: A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a BoxList holds M boxes with a rank-1 scores field representing corresponding scores for each box with scores sorted in decreasing order and a rank-1 classes field representing a class label for each box. The num_valid_nms_boxes is a 0-D integer tensor representing the number of valid elements in `BoxList`, with the valid elements appearing first. Raises: ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have a valid scores field or if non-zero soft_nms_sigma is provided when pad_to_max_output_size is True. """ _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh, change_coordinate_frame, clip_window) if pad_to_max_output_size and soft_nms_sigma != 0.0: raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not ' 'supported when pad_to_max_output_size is True.') if max_classes_per_detection > 1: raise ValueError('Max classes per detection box >1 not supported.') q = shape_utils.get_dim_as_int(boxes.shape[1]) if q > 1: class_ids = tf.expand_dims( tf.argmax(scores, axis=1, output_type=tf.int32), axis=1) boxes = tf.batch_gather(boxes, class_ids) if masks is not None: masks = tf.batch_gather(masks, class_ids) if boundaries is not None: boundaries = tf.batch_gather(boundaries, class_ids) boxes = tf.squeeze(boxes, axis=[1]) if masks is not None: masks = tf.squeeze(masks, axis=[1]) if boundaries is not None: boundaries = tf.squeeze(boundaries, axis=[1]) with tf.name_scope(scope, 'ClassAgnosticNonMaxSuppression'): boxlist_and_class_scores = box_list.BoxList(boxes) max_scores = tf.reduce_max(scores, axis=-1) classes_with_max_scores = tf.argmax(scores, axis=-1) boxlist_and_class_scores.add_field(fields.BoxListFields.scores, max_scores) if masks is not None: boxlist_and_class_scores.add_field(fields.BoxListFields.masks, masks) if boundaries is not None: boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, boundaries) if additional_fields is not None: for key, tensor in additional_fields.items(): boxlist_and_class_scores.add_field(key, tensor) nms_result = None selected_scores = None if pad_to_max_output_size: max_selection_size = max_total_size if use_partitioned_nms: (selected_indices, num_valid_nms_boxes, boxlist_and_class_scores.data['boxes'], boxlist_and_class_scores.data['scores'], argsort_ids) = partitioned_non_max_suppression_padded( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh) classes_with_max_scores = tf.gather(classes_with_max_scores, argsort_ids) else: selected_indices, num_valid_nms_boxes = ( tf.image.non_max_suppression_padded( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh, pad_to_max_output_size=True)) nms_result = box_list_ops.gather(boxlist_and_class_scores, selected_indices) selected_scores = nms_result.get_field(fields.BoxListFields.scores) else: max_selection_size = tf.minimum(max_total_size, boxlist_and_class_scores.num_boxes()) if (hasattr(tf.image, 'non_max_suppression_with_scores') and tf.compat.forward_compatible(2019, 6, 6)): (selected_indices, selected_scores ) = tf.image.non_max_suppression_with_scores( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh, soft_nms_sigma=soft_nms_sigma) num_valid_nms_boxes = tf.shape(selected_indices)[0] selected_indices = tf.concat([ selected_indices, tf.zeros(max_selection_size - num_valid_nms_boxes, tf.int32) ], 0) selected_scores = tf.concat( [selected_scores, tf.zeros(max_selection_size-num_valid_nms_boxes, tf.float32)], -1) nms_result = box_list_ops.gather(boxlist_and_class_scores, selected_indices) else: if soft_nms_sigma != 0: raise ValueError('Soft NMS not supported in current TF version!') selected_indices = tf.image.non_max_suppression( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh) num_valid_nms_boxes = tf.shape(selected_indices)[0] selected_indices = tf.concat( [selected_indices, tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) nms_result = box_list_ops.gather(boxlist_and_class_scores, selected_indices) selected_scores = nms_result.get_field(fields.BoxListFields.scores) valid_nms_boxes_indices = tf.less( tf.range(max_selection_size), num_valid_nms_boxes) nms_result.add_field( fields.BoxListFields.scores, tf.where(valid_nms_boxes_indices, selected_scores, -1*tf.ones(max_selection_size))) selected_classes = tf.gather(classes_with_max_scores, selected_indices) selected_classes = tf.cast(selected_classes, tf.float32) nms_result.add_field(fields.BoxListFields.classes, selected_classes) selected_boxes = nms_result sorted_boxes = box_list_ops.sort_by_field(selected_boxes, fields.BoxListFields.scores) if clip_window is not None: # When pad_to_max_output_size is False, it prunes the boxes with zero # area. sorted_boxes, num_valid_nms_boxes = _clip_window_prune_boxes( sorted_boxes, clip_window, pad_to_max_output_size, change_coordinate_frame) if max_total_size: max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) num_valid_nms_boxes = tf.where(max_total_size > num_valid_nms_boxes, num_valid_nms_boxes, max_total_size) # Select only the valid boxes if pad_to_max_output_size is False. if not pad_to_max_output_size: sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(num_valid_nms_boxes)) return sorted_boxes, num_valid_nms_boxes def batch_multiclass_non_max_suppression(boxes, scores, score_thresh, iou_thresh, max_size_per_class, max_total_size=0, clip_window=None, change_coordinate_frame=False, num_valid_boxes=None, masks=None, additional_fields=None, soft_nms_sigma=0.0, scope=None, use_static_shapes=False, use_partitioned_nms=False, parallel_iterations=32, use_class_agnostic_nms=False, max_classes_per_detection=1, use_dynamic_map_fn=False, use_combined_nms=False, use_hard_nms=False, use_cpu_nms=False): """Multi-class version of non maximum suppression that operates on a batch. This op is similar to `multiclass_non_max_suppression` but operates on a batch of boxes and scores. See documentation for `multiclass_non_max_suppression` for details. Args: boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing detections. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A [batch_size, num_anchors, num_classes] float32 tensor containing the scores for each of the `num_anchors` detections. The scores have to be non-negative when use_static_shapes is set True. score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap with previously selected boxes are removed). max_size_per_class: maximum number of retained boxes per class. max_total_size: maximum number of boxes retained over all classes. By default returns all boxes retained after capping boxes per class. clip_window: A float32 tensor of shape [batch_size, 4] where each entry is of the form [y_min, x_min, y_max, x_max] representing the window to clip boxes to before performing non-max suppression. This argument can also be a tensor of shape [4] in which case, the same clip window is applied to all images in the batch. If clip_widow is None, all boxes are used to perform non-max suppression. change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided) num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape [batch_size] representing the number of valid boxes to be considered for each image in the batch. This parameter allows for ignoring zero paddings. masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. additional_fields: (optional) If not None, a dictionary that maps keys to tensors whose dimensions are [batch_size, num_anchors, ...]. soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; See Bodla et al, https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) NMS. Soft NMS is currently only supported when pad_to_max_output_size is False. scope: tf scope name. use_static_shapes: If true, the output nmsed boxes are padded to be of length `max_size_per_class` and it doesn't clip boxes to max_total_size. Defaults to false. use_partitioned_nms: If true, use partitioned version of non_max_suppression. parallel_iterations: (optional) number of batch items to process in parallel. use_class_agnostic_nms: If true, this uses class-agnostic non max suppression max_classes_per_detection: Maximum number of retained classes per detection box in class-agnostic NMS. use_dynamic_map_fn: If true, images in the batch will be processed within a dynamic loop. Otherwise, a static loop will be used if possible. use_combined_nms: If true, it uses tf.image.combined_non_max_suppression ( multi-class version of NMS that operates on a batch). It greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. It operates independently for each batch. Within each batch, it operates independently for each class for which scores are provided (via the scores field of the input box_list), pruning boxes with score less than a provided threshold prior to applying NMS. This operation is performed on *all* batches and *all* classes in the batch, therefore any background classes should be removed prior to calling this function. Masks and additional fields are not supported. See argument checks in the code below for unsupported arguments. use_hard_nms: Enforce the usage of hard NMS. use_cpu_nms: Enforce NMS to run on CPU. Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'nmsed_masks': (optional) a [batch_size, max_detections, mask_height, mask_width] float32 tensor containing masks for each selected box. This is set to None if input `masks` is None. 'nmsed_additional_fields': (optional) a dictionary of [batch_size, max_detections, ...] float32 tensors corresponding to the tensors specified in the input `additional_fields`. This is not returned if input `additional_fields` is None. 'num_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. Raises: ValueError: if `q` in boxes.shape is not 1 or not equal to number of classes as inferred from scores.shape. """ if use_combined_nms: if change_coordinate_frame: raise ValueError( 'change_coordinate_frame (normalizing coordinates' ' relative to clip_window) is not supported by combined_nms.') if num_valid_boxes is not None: raise ValueError('num_valid_boxes is not supported by combined_nms.') if masks is not None: raise ValueError('masks is not supported by combined_nms.') if soft_nms_sigma != 0.0: raise ValueError('Soft NMS is not supported by combined_nms.') if use_class_agnostic_nms: raise ValueError('class-agnostic NMS is not supported by combined_nms.') if clip_window is None: tf.logging.warning( 'A default clip window of [0. 0. 1. 1.] will be applied for the ' 'boxes.') if additional_fields is not None: tf.logging.warning('additional_fields is not supported by combined_nms.') if parallel_iterations != 32: tf.logging.warning('Number of batch items to be processed in parallel is' ' not configurable by combined_nms.') if max_classes_per_detection > 1: tf.logging.warning( 'max_classes_per_detection is not configurable by combined_nms.') with tf.name_scope(scope, 'CombinedNonMaxSuppression'): (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, batch_num_detections) = tf.image.combined_non_max_suppression( boxes=boxes, scores=scores, max_output_size_per_class=max_size_per_class, max_total_size=max_total_size, iou_threshold=iou_thresh, score_threshold=score_thresh, clip_boxes=(True if clip_window is None else False), pad_per_class=use_static_shapes) if clip_window is not None: if clip_window.shape.ndims == 1: boxes_shape = boxes.shape batch_size = shape_utils.get_dim_as_int(boxes_shape[0]) clip_window = tf.tile(clip_window[tf.newaxis, :], [batch_size, 1]) batch_nmsed_boxes = _clip_boxes(batch_nmsed_boxes, clip_window) # Not supported by combined_non_max_suppression. batch_nmsed_masks = None # Not supported by combined_non_max_suppression. batch_nmsed_additional_fields = None return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, batch_nmsed_masks, batch_nmsed_additional_fields, batch_num_detections) q = shape_utils.get_dim_as_int(boxes.shape[2]) num_classes = shape_utils.get_dim_as_int(scores.shape[2]) if q != 1 and q != num_classes: raise ValueError('third dimension of boxes must be either 1 or equal ' 'to the third dimension of scores.') if change_coordinate_frame and clip_window is None: raise ValueError('if change_coordinate_frame is True, then a clip_window' 'must be specified.') original_masks = masks # Create ordered dictionary using the sorted keys from # additional fields to ensure getting the same key value assignment # in _single_image_nms_fn(). The dictionary is thus a sorted version of # additional_fields. if additional_fields is None: ordered_additional_fields = collections.OrderedDict() else: ordered_additional_fields = collections.OrderedDict( sorted(additional_fields.items(), key=lambda item: item[0])) with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'): boxes_shape = boxes.shape batch_size = shape_utils.get_dim_as_int(boxes_shape[0]) num_anchors = shape_utils.get_dim_as_int(boxes_shape[1]) if batch_size is None: batch_size = tf.shape(boxes)[0] if num_anchors is None: num_anchors = tf.shape(boxes)[1] # If num valid boxes aren't provided, create one and mark all boxes as # valid. if num_valid_boxes is None: num_valid_boxes = tf.ones([batch_size], dtype=tf.int32) * num_anchors # If masks aren't provided, create dummy masks so we can only have one copy # of _single_image_nms_fn and discard the dummy masks after map_fn. if masks is None: masks_shape = tf.stack([batch_size, num_anchors, q, 1, 1]) masks = tf.zeros(masks_shape) if clip_window is None: clip_window = tf.stack([ tf.reduce_min(boxes[:, :, :, 0]), tf.reduce_min(boxes[:, :, :, 1]), tf.reduce_max(boxes[:, :, :, 2]), tf.reduce_max(boxes[:, :, :, 3]) ]) if clip_window.shape.ndims == 1: clip_window = tf.tile(tf.expand_dims(clip_window, 0), [batch_size, 1]) def _single_image_nms_fn(args): """Runs NMS on a single image and returns padded output. Args: args: A list of tensors consisting of the following: per_image_boxes - A [num_anchors, q, 4] float32 tensor containing detections. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. per_image_scores - A [num_anchors, num_classes] float32 tensor containing the scores for each of the `num_anchors` detections. per_image_masks - A [num_anchors, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. per_image_clip_window - A 1D float32 tensor of the form [ymin, xmin, ymax, xmax] representing the window to clip the boxes to. per_image_additional_fields - (optional) A variable number of float32 tensors each with size [num_anchors, ...]. per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of shape [batch_size] representing the number of valid boxes to be considered for each image in the batch. This parameter allows for ignoring zero paddings. Returns: 'nmsed_boxes': A [max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [max_detections] float32 tensor containing the class for boxes. 'nmsed_masks': (optional) a [max_detections, mask_height, mask_width] float32 tensor containing masks for each selected box. This is set to None if input `masks` is None. 'nmsed_additional_fields': (optional) A variable number of float32 tensors each with size [max_detections, ...] corresponding to the input `per_image_additional_fields`. 'num_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. """ per_image_boxes = args[0] per_image_scores = args[1] per_image_masks = args[2] per_image_clip_window = args[3] # Make sure that the order of elements passed in args is aligned with # the iteration order of ordered_additional_fields per_image_additional_fields = { key: value for key, value in zip(ordered_additional_fields, args[4:-1]) } per_image_num_valid_boxes = args[-1] if use_static_shapes: total_proposals = tf.shape(per_image_scores) per_image_scores = tf.where( tf.less(tf.range(total_proposals[0]), per_image_num_valid_boxes), per_image_scores, tf.fill(total_proposals, np.finfo('float32').min)) else: per_image_boxes = tf.reshape( tf.slice(per_image_boxes, 3 * [0], tf.stack([per_image_num_valid_boxes, -1, -1])), [-1, q, 4]) per_image_scores = tf.reshape( tf.slice(per_image_scores, [0, 0], tf.stack([per_image_num_valid_boxes, -1])), [-1, num_classes]) per_image_masks = tf.reshape( tf.slice(per_image_masks, 4 * [0], tf.stack([per_image_num_valid_boxes, -1, -1, -1])), [-1, q, shape_utils.get_dim_as_int(per_image_masks.shape[2]), shape_utils.get_dim_as_int(per_image_masks.shape[3])]) if per_image_additional_fields is not None: for key, tensor in per_image_additional_fields.items(): additional_field_shape = tensor.get_shape() additional_field_dim = len(additional_field_shape) per_image_additional_fields[key] = tf.reshape( tf.slice( per_image_additional_fields[key], additional_field_dim * [0], tf.stack([per_image_num_valid_boxes] + (additional_field_dim - 1) * [-1])), [-1] + [ shape_utils.get_dim_as_int(dim) for dim in additional_field_shape[1:] ]) if use_class_agnostic_nms: nmsed_boxlist, num_valid_nms_boxes = class_agnostic_non_max_suppression( per_image_boxes, per_image_scores, score_thresh, iou_thresh, max_classes_per_detection, max_total_size, clip_window=per_image_clip_window, change_coordinate_frame=change_coordinate_frame, masks=per_image_masks, pad_to_max_output_size=use_static_shapes, use_partitioned_nms=use_partitioned_nms, additional_fields=per_image_additional_fields, soft_nms_sigma=soft_nms_sigma) else: nmsed_boxlist, num_valid_nms_boxes = multiclass_non_max_suppression( per_image_boxes, per_image_scores, score_thresh, iou_thresh, max_size_per_class, max_total_size, clip_window=per_image_clip_window, change_coordinate_frame=change_coordinate_frame, masks=per_image_masks, pad_to_max_output_size=use_static_shapes, use_partitioned_nms=use_partitioned_nms, additional_fields=per_image_additional_fields, soft_nms_sigma=soft_nms_sigma, use_hard_nms=use_hard_nms, use_cpu_nms=use_cpu_nms) if not use_static_shapes: nmsed_boxlist = box_list_ops.pad_or_clip_box_list( nmsed_boxlist, max_total_size) num_detections = num_valid_nms_boxes nmsed_boxes = nmsed_boxlist.get() nmsed_scores = nmsed_boxlist.get_field(fields.BoxListFields.scores) nmsed_classes = nmsed_boxlist.get_field(fields.BoxListFields.classes) nmsed_masks = nmsed_boxlist.get_field(fields.BoxListFields.masks) nmsed_additional_fields = [] # Sorting is needed here to ensure that the values stored in # nmsed_additional_fields are always kept in the same order # across different execution runs. for key in sorted(per_image_additional_fields.keys()): nmsed_additional_fields.append(nmsed_boxlist.get_field(key)) return ([nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks] + nmsed_additional_fields + [num_detections]) num_additional_fields = 0 if ordered_additional_fields: num_additional_fields = len(ordered_additional_fields) num_nmsed_outputs = 4 + num_additional_fields if use_dynamic_map_fn: map_fn = tf.map_fn else: map_fn = shape_utils.static_or_dynamic_map_fn batch_outputs = map_fn( _single_image_nms_fn, elems=([boxes, scores, masks, clip_window] + list(ordered_additional_fields.values()) + [num_valid_boxes]), dtype=(num_nmsed_outputs * [tf.float32] + [tf.int32]), parallel_iterations=parallel_iterations) batch_nmsed_boxes = batch_outputs[0] batch_nmsed_scores = batch_outputs[1] batch_nmsed_classes = batch_outputs[2] batch_nmsed_masks = batch_outputs[3] batch_nmsed_values = batch_outputs[4:-1] batch_nmsed_additional_fields = {} if num_additional_fields > 0: # Sort the keys to ensure arranging elements in same order as # in _single_image_nms_fn. batch_nmsed_keys = list(ordered_additional_fields.keys()) for i in range(len(batch_nmsed_keys)): batch_nmsed_additional_fields[ batch_nmsed_keys[i]] = batch_nmsed_values[i] batch_num_detections = batch_outputs[-1] if original_masks is None: batch_nmsed_masks = None if not ordered_additional_fields: batch_nmsed_additional_fields = None return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, batch_nmsed_masks, batch_nmsed_additional_fields, batch_num_detections)
60,675
46.514487
144
py
models
models-master/research/object_detection/core/balanced_positive_negative_sampler_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.balanced_positive_negative_sampler.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.core import balanced_positive_negative_sampler from object_detection.utils import test_case class BalancedPositiveNegativeSamplerTest(test_case.TestCase): def test_subsample_all_examples(self): if self.has_tpu(): return numpy_labels = np.random.permutation(300) indicator = np.array(np.ones(300) == 1, bool) numpy_labels = (numpy_labels - 200) > 0 labels = np.array(numpy_labels, bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) self.assertEqual(sum(is_sampled), 64) self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 32) self.assertEqual(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)), 32) def test_subsample_all_examples_static(self): if not self.has_tpu(): return numpy_labels = np.random.permutation(300) indicator = np.array(np.ones(300) == 1, bool) numpy_labels = (numpy_labels - 200) > 0 labels = np.array(numpy_labels, bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( is_static=True)) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) self.assertEqual(sum(is_sampled), 64) self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 32) self.assertEqual(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)), 32) def test_subsample_selection(self): if self.has_tpu(): return # Test random sampling when only some examples can be sampled: # 100 samples, 20 positives, 10 positives cannot be sampled. numpy_labels = np.arange(100) numpy_indicator = numpy_labels < 90 indicator = np.array(numpy_indicator, bool) numpy_labels = (numpy_labels - 80) >= 0 labels = np.array(numpy_labels, bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) self.assertEqual(sum(is_sampled), 64) self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) self.assertEqual(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)), 54) self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) def test_subsample_selection_static(self): if not self.has_tpu(): return # Test random sampling when only some examples can be sampled: # 100 samples, 20 positives, 10 positives cannot be sampled. numpy_labels = np.arange(100) numpy_indicator = numpy_labels < 90 indicator = np.array(numpy_indicator, bool) numpy_labels = (numpy_labels - 80) >= 0 labels = np.array(numpy_labels, bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( is_static=True)) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) self.assertEqual(sum(is_sampled), 64) self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) self.assertEqual(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)), 54) self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) def test_subsample_selection_larger_batch_size(self): if self.has_tpu(): return # Test random sampling when total number of examples that can be sampled are # less than batch size: # 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64. # It should still return 64 samples, with 4 of them that couldn't have been # sampled. numpy_labels = np.arange(100) numpy_indicator = numpy_labels < 60 indicator = np.array(numpy_indicator, bool) numpy_labels = (numpy_labels - 50) >= 0 labels = np.array(numpy_labels, bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute_cpu(graph_fn, [indicator, labels]) self.assertEqual(sum(is_sampled), 60) self.assertGreaterEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) self.assertGreaterEqual( sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)), 50) self.assertEqual(sum(np.logical_and(is_sampled, numpy_indicator)), 60) def test_subsample_selection_larger_batch_size_static(self): if not self.has_tpu(): return # Test random sampling when total number of examples that can be sampled are # less than batch size: # 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64. # It should still return 64 samples, with 4 of them that couldn't have been # sampled. numpy_labels = np.arange(100) numpy_indicator = numpy_labels < 60 indicator = np.array(numpy_indicator, bool) numpy_labels = (numpy_labels - 50) >= 0 labels = np.array(numpy_labels, bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( is_static=True)) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute_tpu(graph_fn, [indicator, labels]) self.assertEqual(sum(is_sampled), 64) self.assertGreaterEqual(sum(np.logical_and(numpy_labels, is_sampled)), 10) self.assertGreaterEqual( sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)), 50) self.assertEqual(sum(np.logical_and(is_sampled, numpy_indicator)), 60) def test_subsample_selection_no_batch_size(self): if self.has_tpu(): return # Test random sampling when only some examples can be sampled: # 1000 samples, 6 positives (5 can be sampled). numpy_labels = np.arange(1000) numpy_indicator = numpy_labels < 999 numpy_labels = (numpy_labels - 994) >= 0 def graph_fn(indicator, labels): sampler = (balanced_positive_negative_sampler. BalancedPositiveNegativeSampler(0.01)) is_sampled = sampler.subsample(indicator, None, labels) return is_sampled is_sampled_out = self.execute_cpu(graph_fn, [numpy_indicator, numpy_labels]) self.assertEqual(sum(is_sampled_out), 500) self.assertEqual(sum(np.logical_and(numpy_labels, is_sampled_out)), 5) self.assertEqual(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled_out)), 495) self.assertAllEqual(is_sampled_out, np.logical_and(is_sampled_out, numpy_indicator)) def test_subsample_selection_no_batch_size_static(self): labels = tf.constant([[True, False, False]]) indicator = tf.constant([True, False, True]) sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) with self.assertRaises(ValueError): sampler.subsample(indicator, None, labels) def test_raises_error_with_incorrect_label_shape(self): labels = tf.constant([[True, False, False]]) indicator = tf.constant([True, False, True]) sampler = (balanced_positive_negative_sampler. BalancedPositiveNegativeSampler()) with self.assertRaises(ValueError): sampler.subsample(indicator, 64, labels) def test_raises_error_with_incorrect_indicator_shape(self): labels = tf.constant([True, False, False]) indicator = tf.constant([[True, False, True]]) sampler = (balanced_positive_negative_sampler. BalancedPositiveNegativeSampler()) with self.assertRaises(ValueError): sampler.subsample(indicator, 64, labels) if __name__ == '__main__': tf.test.main()
8,835
40.483568
80
py
models
models-master/research/object_detection/core/losses_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for google3.research.vale.object_detection.losses.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from six.moves import zip import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.core import losses from object_detection.core import matcher from object_detection.utils import test_case class WeightedL2LocalizationLossTest(test_case.TestCase): def testReturnsCorrectWeightedLoss(self): batch_size = 3 num_anchors = 10 code_size = 4 def graph_fn(): prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32) loss_op = losses.WeightedL2LocalizationLoss() loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor, weights=weights)) return loss expected_loss = (3 * 5 * 4) / 2.0 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, expected_loss) def testReturnsCorrectAnchorwiseLoss(self): batch_size = 3 num_anchors = 16 code_size = 4 def graph_fn(): prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.ones([batch_size, num_anchors]) loss_op = losses.WeightedL2LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) return loss expected_loss = np.ones((batch_size, num_anchors)) * 2 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, expected_loss) def testReturnsCorrectNanLoss(self): batch_size = 3 num_anchors = 10 code_size = 4 def graph_fn(): prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.concat([ tf.zeros([batch_size, num_anchors, code_size / 2]), tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan ], axis=2) weights = tf.ones([batch_size, num_anchors]) loss_op = losses.WeightedL2LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, ignore_nan_targets=True) loss = tf.reduce_sum(loss) return loss expected_loss = (3 * 5 * 4) / 2.0 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, expected_loss) def testReturnsCorrectWeightedLossWithLossesMask(self): batch_size = 4 num_anchors = 10 code_size = 4 def graph_fn(): prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32) losses_mask = tf.constant([True, False, True, True], tf.bool) loss_op = losses.WeightedL2LocalizationLoss() loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask)) return loss expected_loss = (3 * 5 * 4) / 2.0 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, expected_loss) class WeightedSmoothL1LocalizationLossTest(test_case.TestCase): def testReturnsCorrectLoss(self): batch_size = 2 num_anchors = 3 code_size = 4 def graph_fn(): prediction_tensor = tf.constant([[[2.5, 0, .4, 0], [0, 0, 0, 0], [0, 2.5, 0, .4]], [[3.5, 0, 0, 0], [0, .4, 0, .9], [0, 0, 1.5, 0]]], tf.float32) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant([[2, 1, 1], [0, 3, 0]], tf.float32) loss_op = losses.WeightedSmoothL1LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) return loss exp_loss = 7.695 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithLossesMask(self): batch_size = 3 num_anchors = 3 code_size = 4 def graph_fn(): prediction_tensor = tf.constant([[[2.5, 0, .4, 0], [0, 0, 0, 0], [0, 2.5, 0, .4]], [[3.5, 0, 0, 0], [0, .4, 0, .9], [0, 0, 1.5, 0]], [[3.5, 7., 0, 0], [0, .4, 0, .9], [2.2, 2.2, 1.5, 0]]], tf.float32) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant([[2, 1, 1], [0, 3, 0], [4, 3, 0]], tf.float32) losses_mask = tf.constant([True, True, False], tf.bool) loss_op = losses.WeightedSmoothL1LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss) return loss exp_loss = 7.695 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) class WeightedIOULocalizationLossTest(test_case.TestCase): def testReturnsCorrectLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [0, 0, .5, .25]]]) target_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [50, 50, 500.5, 100.25]]]) weights = [[1.0, .5, 2.0]] loss_op = losses.WeightedIOULocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) return loss exp_loss = 2.0 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithNoLabels(self): def graph_fn(): prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [0, 0, .5, .25]]]) target_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [50, 50, 500.5, 100.25]]]) weights = [[1.0, .5, 2.0]] losses_mask = tf.constant([False], tf.bool) loss_op = losses.WeightedIOULocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss) return loss exp_loss = 0.0 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) class WeightedGIOULocalizationLossTest(test_case.TestCase): def testReturnsCorrectLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [0, 0, 0, 0]]]) target_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [5, 5, 10, 10]]]) weights = [[1.0, .5, 2.0]] loss_op = losses.WeightedGIOULocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) return loss exp_loss = 3.5 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithNoLabels(self): def graph_fn(): prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [0, 0, .5, .25]]]) target_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [50, 50, 500.5, 100.25]]]) weights = [[1.0, .5, 2.0]] losses_mask = tf.constant([False], tf.bool) loss_op = losses.WeightedGIOULocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss) return loss exp_loss = 0.0 loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) class WeightedSigmoidClassificationLossTest(test_case.TestCase): def testReturnsCorrectLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) loss_op = losses.WeightedSigmoidClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) return loss exp_loss = -2 * math.log(.5) loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) loss_op = losses.WeightedSigmoidClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss, axis=2) return loss exp_loss = np.matrix([[0, 0, -math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithClassIndices(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100, 100], [100, -100, -100, -100], [100, 0, -100, 100], [-100, -100, 100, -100]], [[-100, 0, 100, 100], [-100, 100, -100, 100], [100, 100, 100, 100], [0, 0, -1, 100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0, 0], [1, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]], [[0, 0, 1, 0], [0, 1, 0, 0], [1, 1, 1, 0], [1, 0, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0]]], tf.float32) # Ignores the last class. class_indices = tf.constant([0, 1, 2], tf.int32) loss_op = losses.WeightedSigmoidClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, class_indices=class_indices) loss = tf.reduce_sum(loss, axis=2) return loss exp_loss = np.matrix([[0, 0, -math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithLossesMask(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) losses_mask = tf.constant([True, True, False], tf.bool) loss_op = losses.WeightedSigmoidClassificationLoss() loss_per_anchor = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss_per_anchor) return loss exp_loss = -2 * math.log(.5) loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) def _logit(probability): return math.log(probability / (1. - probability)) class SigmoidFocalClassificationLossTest(test_case.TestCase): def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self): def graph_fn(): prediction_tensor = tf.constant([[[_logit(0.97)], [_logit(0.91)], [_logit(0.73)], [_logit(0.27)], [_logit(0.09)], [_logit(0.03)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) return sigmoid_loss, focal_loss sigmoid_loss, focal_loss = self.execute(graph_fn, []) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss / focal_loss))) self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]]) def testHardExamplesProduceLossComparableToSigmoidXEntropy(self): def graph_fn(): prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)], [_logit(0.45)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) return sigmoid_loss, focal_loss sigmoid_loss, focal_loss = self.execute(graph_fn, []) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss / focal_loss))) self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]]) def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self): def graph_fn(): prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)], [_logit(0.45)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights)) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)) return sigmoid_loss, focal_loss sigmoid_loss, focal_loss = self.execute(graph_fn, []) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss / focal_loss))) self.assertAlmostEqual(order_of_ratio, 1.) def testIgnoreNegativeExampleLossViaAlphaMultiplier(self): def graph_fn(): prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)], [_logit(0.45)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=1.0) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) return sigmoid_loss, focal_loss sigmoid_loss, focal_loss = self.execute(graph_fn, []) self.assertAllClose(focal_loss[0][3:], [0., 0.]) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss[0][:3] / focal_loss[0][:3]))) self.assertAllClose(order_of_ratio, [1., 1., 1.]) def testIgnorePositiveExampleLossViaAlphaMultiplier(self): def graph_fn(): prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)], [_logit(0.45)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=0.0) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) return sigmoid_loss, focal_loss sigmoid_loss, focal_loss = self.execute(graph_fn, []) self.assertAllClose(focal_loss[0][:3], [0., 0., 0.]) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss[0][3:] / focal_loss[0][3:]))) self.assertAllClose(order_of_ratio, [1., 1.]) def testSimilarToSigmoidXEntropyWithHalfAlphaAndZeroGammaUpToAScale(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.5, gamma=0.0) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights) sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights) return sigmoid_loss, focal_loss sigmoid_loss, focal_loss = self.execute(graph_fn, []) self.assertAllClose(sigmoid_loss, focal_loss * 2) def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=None, gamma=0.0) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights) sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights) return sigmoid_loss, focal_loss sigmoid_loss, focal_loss = self.execute(graph_fn, []) self.assertAllClose(sigmoid_loss, focal_loss) def testExpectedLossWithAlphaOneAndZeroGamma(self): def graph_fn(): # All zeros correspond to 0.5 probability. prediction_tensor = tf.constant([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=1.0, gamma=0.0) focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights)) return focal_loss focal_loss = self.execute(graph_fn, []) self.assertAllClose( (-math.log(.5) * # x-entropy per class per anchor 1.0 * # alpha 8), # positives from 8 anchors focal_loss) def testExpectedLossWithAlpha75AndZeroGamma(self): def graph_fn(): # All zeros correspond to 0.5 probability. prediction_tensor = tf.constant([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75, gamma=0.0) focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights)) return focal_loss focal_loss = self.execute(graph_fn, []) self.assertAllClose( (-math.log(.5) * # x-entropy per class per anchor. ((0.75 * # alpha for positives. 8) + # positives from 8 anchors. (0.25 * # alpha for negatives. 8 * 2))), # negatives from 8 anchors for two classes. focal_loss) def testExpectedLossWithLossesMask(self): def graph_fn(): # All zeros correspond to 0.5 probability. prediction_tensor = tf.constant([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]], [[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) losses_mask = tf.constant([True, True, False], tf.bool) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75, gamma=0.0) focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask)) return focal_loss focal_loss = self.execute(graph_fn, []) self.assertAllClose( (-math.log(.5) * # x-entropy per class per anchor. ((0.75 * # alpha for positives. 8) + # positives from 8 anchors. (0.25 * # alpha for negatives. 8 * 2))), # negatives from 8 anchors for two classes. focal_loss) class WeightedSoftmaxClassificationLossTest(test_case.TestCase): def testReturnsCorrectLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [0.5, 0.5, 0.5], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) loss_op = losses.WeightedSoftmaxClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) return loss loss_output = self.execute(graph_fn, []) exp_loss = - 1.5 * math.log(.5) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [0.5, 0.5, 0.5], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) loss_op = losses.WeightedSoftmaxClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) return loss loss_output = self.execute(graph_fn, []) exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLossWithHighLogitScaleSetting(self): """At very high logit_scale, all predictions will be ~0.33.""" def graph_fn(): logit_scale = 10e16 prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) loss_op = losses.WeightedSoftmaxClassificationLoss( logit_scale=logit_scale) loss = loss_op(prediction_tensor, target_tensor, weights=weights) return loss uniform_distribution_loss = - math.log(.33333333333) exp_loss = np.matrix([[uniform_distribution_loss] * 4, [uniform_distribution_loss] * 4]) loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithLossesMask(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 0, 0]], [[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [0.5, 0.5, 0.5], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) losses_mask = tf.constant([True, True, False], tf.bool) loss_op = losses.WeightedSoftmaxClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss) return loss loss_output = self.execute(graph_fn, []) exp_loss = - 1.5 * math.log(.5) self.assertAllClose(loss_output, exp_loss) class WeightedSoftmaxClassificationAgainstLogitsLossTest(test_case.TestCase): def testReturnsCorrectLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 1]], tf.float32) weights_shape = tf.shape(weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), tf.constant([3])], axis=0) weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple) loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) return loss loss_output = self.execute(graph_fn, []) exp_loss = - 1.5 * math.log(.5) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 0]], tf.float32) weights_shape = tf.shape(weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), tf.constant([3])], axis=0) weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple) loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) return loss loss_output = self.execute(graph_fn, []) exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLossWithLogitScaleSetting(self): def graph_fn(): logit_scale = 100. prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 0]], tf.float32) weights_shape = tf.shape(weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), tf.constant([3])], axis=0) weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple) loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss( logit_scale=logit_scale) loss = loss_op(prediction_tensor, target_tensor, weights=weights) return loss # find softmax of the two prediction types above softmax_pred1 = [np.exp(-1), np.exp(-1), np.exp(1)] softmax_pred1 /= sum(softmax_pred1) softmax_pred2 = [np.exp(0), np.exp(0), np.exp(-1)] softmax_pred2 /= sum(softmax_pred2) # compute the expected cross entropy for perfect matches exp_entropy1 = sum( [-x*np.log(x) for x in softmax_pred1]) exp_entropy2 = sum( [-x*np.log(x) for x in softmax_pred2]) # weighted expected losses exp_loss = np.matrix( [[exp_entropy1, exp_entropy1, exp_entropy2*.5, exp_entropy1], [exp_entropy2, exp_entropy1, exp_entropy1, 0.]]) loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) class BootstrappedSigmoidClassificationLossTest(test_case.TestCase): def testReturnsCorrectLossSoftBootstrapping(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, 0], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) alpha = tf.constant(.5, tf.float32) loss_op = losses.BootstrappedSigmoidClassificationLoss( alpha, bootstrap_type='soft') loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) return loss loss_output = self.execute(graph_fn, []) exp_loss = -math.log(.5) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossHardBootstrapping(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, 0], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) alpha = tf.constant(.5, tf.float32) loss_op = losses.BootstrappedSigmoidClassificationLoss( alpha, bootstrap_type='hard') loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) return loss loss_output = self.execute(graph_fn, []) exp_loss = -math.log(.5) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLoss(self): def graph_fn(): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) alpha = tf.constant(.5, tf.float32) loss_op = losses.BootstrappedSigmoidClassificationLoss( alpha, bootstrap_type='hard') loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss, axis=2) return loss loss_output = self.execute(graph_fn, []) exp_loss = np.matrix([[0, 0, -math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) self.assertAllClose(loss_output, exp_loss) class HardExampleMinerTest(test_case.TestCase): def testHardMiningWithSingleLossType(self): def graph_fn(): location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32) cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32) box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32) decoded_boxlist_list = [] decoded_boxlist_list.append(box_list.BoxList(box_corners)) decoded_boxlist_list.append(box_list.BoxList(box_corners)) # Uses only location loss to select hard examples loss_op = losses.HardExampleMiner(num_hard_examples=1, iou_threshold=0.0, loss_type='loc', cls_loss_weight=1, loc_loss_weight=1) (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list) return loc_loss, cls_loss loc_loss_output, cls_loss_output = self.execute(graph_fn, []) exp_loc_loss = 100 + 3 exp_cls_loss = 0 + 0 self.assertAllClose(loc_loss_output, exp_loc_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) def testHardMiningWithBothLossType(self): def graph_fn(): location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32) cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32) box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32) decoded_boxlist_list = [] decoded_boxlist_list.append(box_list.BoxList(box_corners)) decoded_boxlist_list.append(box_list.BoxList(box_corners)) loss_op = losses.HardExampleMiner(num_hard_examples=1, iou_threshold=0.0, loss_type='both', cls_loss_weight=1, loc_loss_weight=1) (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list) return loc_loss, cls_loss loc_loss_output, cls_loss_output = self.execute(graph_fn, []) exp_loc_loss = 80 + 0 exp_cls_loss = 50 + 9 self.assertAllClose(loc_loss_output, exp_loc_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) def testHardMiningNMS(self): def graph_fn(): location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32) cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32) box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.9, 0.9, 0.99, 0.99], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32) decoded_boxlist_list = [] decoded_boxlist_list.append(box_list.BoxList(box_corners)) decoded_boxlist_list.append(box_list.BoxList(box_corners)) loss_op = losses.HardExampleMiner(num_hard_examples=2, iou_threshold=0.5, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1) (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list) return loc_loss, cls_loss loc_loss_output, cls_loss_output = self.execute(graph_fn, []) exp_loc_loss = 0 + 90 + 0 + 1 exp_cls_loss = 110 + 10 + 9 + 6 self.assertAllClose(loc_loss_output, exp_loc_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) def testEnforceNegativesPerPositiveRatio(self): location_losses = np.array([[100, 90, 80, 0, 1, 2, 3, 10, 20, 100, 20, 3]], np.float32) cls_losses = np.array([[0, 0, 100, 0, 90, 70, 0, 60, 0, 17, 13, 0]], np.float32) box_corners = np.array([[0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.5, 0.1], [0.0, 0.0, 0.6, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.8, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 1.0, 0.1], [0.0, 0.0, 1.1, 0.1], [0.0, 0.0, 0.2, 0.1]], np.float32) match_results = np.array([2, -1, 0, -1, -1, 1, -1, -1, -1, -1, -1, 3], np.int32) max_negatives_per_positive_list = [0.0, 0.5, 1.0, 1.5, 10] exp_loc_loss_list = [80 + 2, 80 + 1 + 2, 80 + 1 + 2 + 10, 80 + 1 + 2 + 10 + 100, 80 + 1 + 2 + 10 + 100 + 20] exp_cls_loss_list = [100 + 70, 100 + 90 + 70, 100 + 90 + 70 + 60, 100 + 90 + 70 + 60 + 17, 100 + 90 + 70 + 60 + 17 + 13] # pylint: disable=cell-var-from-loop for max_negatives_per_positive, exp_loc_loss, exp_cls_loss in zip( max_negatives_per_positive_list, exp_loc_loss_list, exp_cls_loss_list): def graph_fn(): loss_op = losses.HardExampleMiner( num_hard_examples=None, iou_threshold=0.9999, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1, max_negatives_per_positive=max_negatives_per_positive) match_list = [matcher.Match(tf.constant(match_results))] decoded_boxlist_list = [box_list.BoxList(tf.constant(box_corners))] (loc_loss, cls_loss) = loss_op(tf.constant(location_losses), tf.constant(cls_losses), decoded_boxlist_list, match_list) return loc_loss, cls_loss loc_loss_output, cls_loss_output = self.execute_cpu(graph_fn, []) self.assertAllClose(loc_loss_output, exp_loc_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) # pylint: enable=cell-var-from-loop def testEnforceNegativesPerPositiveRatioWithMinNegativesPerImage(self): location_losses = np.array([[100, 90, 80, 0, 1, 2, 3, 10, 20, 100, 20, 3]], np.float32) cls_losses = np.array([[0, 0, 100, 0, 90, 70, 0, 60, 0, 17, 13, 0]], np.float32) box_corners = np.array([[0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.5, 0.1], [0.0, 0.0, 0.6, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.8, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 1.0, 0.1], [0.0, 0.0, 1.1, 0.1], [0.0, 0.0, 0.2, 0.1]], np.float32) match_results = np.array([-1] * 12, np.int32) min_negatives_per_image_list = [0, 1, 2, 4, 5, 6] exp_loc_loss_list = [0, 80, 80 + 1, 80 + 1 + 2 + 10, 80 + 1 + 2 + 10 + 100, 80 + 1 + 2 + 10 + 100 + 20] exp_cls_loss_list = [0, 100, 100 + 90, 100 + 90 + 70 + 60, 100 + 90 + 70 + 60 + 17, 100 + 90 + 70 + 60 + 17 + 13] # pylint: disable=cell-var-from-loop for min_negatives_per_image, exp_loc_loss, exp_cls_loss in zip( min_negatives_per_image_list, exp_loc_loss_list, exp_cls_loss_list): def graph_fn(): loss_op = losses.HardExampleMiner( num_hard_examples=None, iou_threshold=0.9999, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1, max_negatives_per_positive=3, min_negatives_per_image=min_negatives_per_image) match_list = [matcher.Match(tf.constant(match_results))] decoded_boxlist_list = [box_list.BoxList(tf.constant(box_corners))] (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list, match_list) return loc_loss, cls_loss loc_loss_output, cls_loss_output = self.execute_cpu(graph_fn, []) self.assertAllClose(loc_loss_output, exp_loc_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) # pylint: enable=cell-var-from-loop LOG_2 = np.log(2) LOG_3 = np.log(3) class PenaltyReducedLogisticFocalLossTest(test_case.TestCase): """Testing loss function from Equation (1) in [1]. [1]: https://arxiv.org/abs/1904.07850 """ def setUp(self): super(PenaltyReducedLogisticFocalLossTest, self).setUp() self._prediction = np.array([ # First batch [[1 / 2, 1 / 4, 3 / 4], [3 / 4, 1 / 3, 1 / 3]], # Second Batch [[0.0, 1.0, 1 / 2], [3 / 4, 2 / 3, 1 / 3]]], np.float32) self._prediction = np.log(self._prediction/(1 - self._prediction)) self._target = np.array([ # First batch [[1.0, 0.91, 1.0], [0.36, 0.84, 1.0]], # Second Batch [[0.01, 1.0, 0.75], [0.96, 1.0, 1.0]]], np.float32) def test_returns_correct_loss(self): def graph_fn(prediction, target): weights = tf.constant([ [[1.0], [1.0]], [[1.0], [1.0]], ]) loss = losses.PenaltyReducedLogisticFocalLoss(alpha=2.0, beta=0.5) computed_value = loss._compute_loss(prediction, target, weights) return computed_value computed_value = self.execute(graph_fn, [self._prediction, self._target]) expected_value = np.array([ # First batch [[1 / 4 * LOG_2, 0.3 * 0.0625 * (2 * LOG_2 - LOG_3), 1 / 16 * (2 * LOG_2 - LOG_3)], [0.8 * 9 / 16 * 2 * LOG_2, 0.4 * 1 / 9 * (LOG_3 - LOG_2), 4 / 9 * LOG_3]], # Second Batch [[0.0, 0.0, 1 / 2 * 1 / 4 * LOG_2], [0.2 * 9 / 16 * 2 * LOG_2, 1 / 9 * (LOG_3 - LOG_2), 4 / 9 * LOG_3]]]) self.assertAllClose(computed_value, expected_value, rtol=1e-3, atol=1e-3) def test_returns_correct_loss_weighted(self): def graph_fn(prediction, target): weights = tf.constant([ [[1.0, 0.0, 1.0], [0.0, 0.0, 1.0]], [[1.0, 1.0, 1.0], [0.0, 0.0, 0.0]], ]) loss = losses.PenaltyReducedLogisticFocalLoss(alpha=2.0, beta=0.5) computed_value = loss._compute_loss(prediction, target, weights) return computed_value computed_value = self.execute(graph_fn, [self._prediction, self._target]) expected_value = np.array([ # First batch [[1 / 4 * LOG_2, 0.0, 1 / 16 * (2 * LOG_2 - LOG_3)], [0.0, 0.0, 4 / 9 * LOG_3]], # Second Batch [[0.0, 0.0, 1 / 2 * 1 / 4 * LOG_2], [0.0, 0.0, 0.0]]]) self.assertAllClose(computed_value, expected_value, rtol=1e-3, atol=1e-3) class L1LocalizationLossTest(test_case.TestCase): def test_returns_correct_loss(self): def graph_fn(): loss = losses.L1LocalizationLoss() pred = [[0.1, 0.2], [0.7, 0.5]] target = [[0.9, 1.0], [0.1, 0.4]] weights = [[1.0, 0.0], [1.0, 1.0]] return loss._compute_loss(pred, target, weights) computed_value = self.execute(graph_fn, []) self.assertAllClose(computed_value, [[0.8, 0.0], [0.6, 0.1]], rtol=1e-6) class WeightedDiceClassificationLoss(test_case.TestCase): def test_compute_weights_1(self): def graph_fn(): loss = losses.WeightedDiceClassificationLoss(squared_normalization=False) pred = np.zeros((2, 3, 4), dtype=np.float32) target = np.zeros((2, 3, 4), dtype=np.float32) pred[0, 1, 0] = _logit(0.9) pred[0, 2, 0] = _logit(0.1) pred[0, 2, 2] = _logit(0.5) pred[0, 1, 3] = _logit(0.1) pred[1, 2, 3] = _logit(0.2) pred[1, 1, 1] = _logit(0.3) pred[1, 0, 2] = _logit(0.1) target[0, 1, 0] = 1.0 target[0, 2, 2] = 1.0 target[0, 1, 3] = 1.0 target[1, 2, 3] = 1.0 target[1, 1, 1] = 0.0 target[1, 0, 2] = 0.0 weights = np.ones_like(target) return loss._compute_loss(pred, target, weights) dice_coeff = np.zeros((2, 4)) dice_coeff[0, 0] = 2 * 0.9 / 2.5 dice_coeff[0, 2] = 2 * 0.5 / 2.5 dice_coeff[0, 3] = 2 * 0.1 / 2.1 dice_coeff[1, 3] = 2 * 0.2 / 2.2 computed_value = self.execute(graph_fn, []) self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6) def test_compute_weights_set(self): def graph_fn(): loss = losses.WeightedDiceClassificationLoss(squared_normalization=False) pred = np.zeros((2, 3, 4), dtype=np.float32) target = np.zeros((2, 3, 4), dtype=np.float32) pred[0, 1, 0] = _logit(0.9) pred[0, 2, 0] = _logit(0.1) pred[0, 2, 2] = _logit(0.5) pred[0, 1, 3] = _logit(0.1) pred[1, 2, 3] = _logit(0.2) pred[1, 1, 1] = _logit(0.3) pred[1, 0, 2] = _logit(0.1) target[0, 1, 0] = 1.0 target[0, 2, 2] = 1.0 target[0, 1, 3] = 1.0 target[1, 2, 3] = 1.0 target[1, 1, 1] = 0.0 target[1, 0, 2] = 0.0 weights = np.ones_like(target) weights[:, :, 0] = 0.0 return loss._compute_loss(pred, target, weights) dice_coeff = np.zeros((2, 4)) dice_coeff[0, 2] = 2 * 0.5 / 2.5 dice_coeff[0, 3] = 2 * 0.1 / 2.1 dice_coeff[1, 3] = 2 * 0.2 / 2.2 computed_value = self.execute(graph_fn, []) self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6) def test_class_indices(self): def graph_fn(): loss = losses.WeightedDiceClassificationLoss(squared_normalization=False) pred = np.zeros((2, 3, 4), dtype=np.float32) target = np.zeros((2, 3, 4), dtype=np.float32) pred[0, 1, 0] = _logit(0.9) pred[0, 2, 0] = _logit(0.1) pred[0, 2, 2] = _logit(0.5) pred[0, 1, 3] = _logit(0.1) pred[1, 2, 3] = _logit(0.2) pred[1, 1, 1] = _logit(0.3) pred[1, 0, 2] = _logit(0.1) target[0, 1, 0] = 1.0 target[0, 2, 2] = 1.0 target[0, 1, 3] = 1.0 target[1, 2, 3] = 1.0 target[1, 1, 1] = 0.0 target[1, 0, 2] = 0.0 weights = np.ones_like(target) return loss._compute_loss(pred, target, weights, class_indices=[0]) dice_coeff = np.zeros((2, 4)) dice_coeff[0, 0] = 2 * 0.9 / 2.5 computed_value = self.execute(graph_fn, []) self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6) if __name__ == '__main__': tf.test.main()
69,447
43.603725
80
py
models
models-master/research/object_detection/core/losses.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classification and regression loss functions for object detection. Localization losses: * WeightedL2LocalizationLoss * WeightedSmoothL1LocalizationLoss * WeightedIOULocalizationLoss Classification losses: * WeightedSigmoidClassificationLoss * WeightedSoftmaxClassificationLoss * WeightedSoftmaxClassificationAgainstLogitsLoss * BootstrappedSigmoidClassificationLoss * WeightedDiceClassificationLoss """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.utils import ops from object_detection.utils import shape_utils class Loss(six.with_metaclass(abc.ABCMeta, object)): """Abstract base class for loss functions.""" def __call__(self, prediction_tensor, target_tensor, ignore_nan_targets=False, losses_mask=None, scope=None, **params): """Call the loss function. Args: prediction_tensor: an N-d tensor of shape [batch, anchors, ...] representing predicted quantities. target_tensor: an N-d tensor of shape [batch, anchors, ...] representing regression or classification targets. ignore_nan_targets: whether to ignore nan targets in the loss computation. E.g. can be used if the target tensor is missing groundtruth data that shouldn't be factored into the loss. losses_mask: A [batch] boolean tensor that indicates whether losses should be applied to individual images in the batch. For elements that are False, corresponding prediction, target, and weight tensors will not contribute to loss computation. If None, no filtering will take place prior to loss computation. scope: Op scope name. Defaults to 'Loss' if None. **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: a tensor representing the value of the loss function. """ with tf.name_scope(scope, 'Loss', [prediction_tensor, target_tensor, params]) as scope: if ignore_nan_targets: target_tensor = tf.where(tf.is_nan(target_tensor), prediction_tensor, target_tensor) if losses_mask is not None: tensor_multiplier = self._get_loss_multiplier_for_tensor( prediction_tensor, losses_mask) prediction_tensor *= tensor_multiplier target_tensor *= tensor_multiplier if 'weights' in params: params['weights'] = tf.convert_to_tensor(params['weights']) weights_multiplier = self._get_loss_multiplier_for_tensor( params['weights'], losses_mask) params['weights'] *= weights_multiplier return self._compute_loss(prediction_tensor, target_tensor, **params) def _get_loss_multiplier_for_tensor(self, tensor, losses_mask): loss_multiplier_shape = tf.stack([-1] + [1] * (len(tensor.shape) - 1)) return tf.cast(tf.reshape(losses_mask, loss_multiplier_shape), tf.float32) @abc.abstractmethod def _compute_loss(self, prediction_tensor, target_tensor, **params): """Method to be overridden by implementations. Args: prediction_tensor: a tensor representing predicted quantities target_tensor: a tensor representing regression or classification targets **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per anchor """ pass class WeightedL2LocalizationLoss(Loss): """L2 localization loss function with anchorwise output support. Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2 """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims( weights, 2) square_diff = 0.5 * tf.square(weighted_diff) return tf.reduce_sum(square_diff, 2) class WeightedSmoothL1LocalizationLoss(Loss): """Smooth L1 localization loss function aka Huber Loss.. The smooth L1_loss is defined elementwise as .5 x^2 if |x| <= delta and delta * (|x|- 0.5*delta) otherwise, where x is the difference between predictions and target. See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015) """ def __init__(self, delta=1.0): """Constructor. Args: delta: delta for smooth L1 loss. """ super(WeightedSmoothL1LocalizationLoss, self).__init__() self._delta = delta def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ return tf.reduce_sum(tf.losses.huber_loss( target_tensor, prediction_tensor, delta=self._delta, weights=tf.expand_dims(weights, axis=2), loss_collection=None, reduction=tf.losses.Reduction.NONE ), axis=2) class WeightedIOULocalizationLoss(Loss): """IOU localization loss function. Sums the IOU for corresponding pairs of predicted/groundtruth boxes and for each pair assign a loss of 1 - IOU. We then compute a weighted sum over all pairs which is returned as the total loss. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded predicted boxes target_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded target boxes weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [-1, 4])) target_boxes = box_list.BoxList(tf.reshape(target_tensor, [-1, 4])) per_anchor_iou_loss = 1.0 - box_list_ops.matched_iou(predicted_boxes, target_boxes) return tf.reshape(weights, [-1]) * per_anchor_iou_loss class WeightedGIOULocalizationLoss(Loss): """GIOU localization loss function. Sums the GIOU loss for corresponding pairs of predicted/groundtruth boxes and for each pair assign a loss of 1 - GIOU. We then compute a weighted sum over all pairs which is returned as the total loss. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded predicted boxes target_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded target boxes weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ batch_size, num_anchors, _ = shape_utils.combined_static_and_dynamic_shape( prediction_tensor) predicted_boxes = tf.reshape(prediction_tensor, [-1, 4]) target_boxes = tf.reshape(target_tensor, [-1, 4]) per_anchor_iou_loss = 1 - ops.giou(predicted_boxes, target_boxes) return tf.reshape(tf.reshape(weights, [-1]) * per_anchor_iou_loss, [batch_size, num_anchors]) class WeightedSigmoidClassificationLoss(Loss): """Sigmoid cross entropy classification loss function.""" def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=target_tensor, logits=prediction_tensor)) return per_entry_cross_ent * weights class WeightedDiceClassificationLoss(Loss): """Dice loss for classification [1][2]. [1]: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient [2]: https://arxiv.org/abs/1606.04797 """ def __init__(self, squared_normalization, is_prediction_probability=False): """Initializes the loss object. Args: squared_normalization: boolean, if set, we square the probabilities in the denominator term used for normalization. is_prediction_probability: boolean, whether or not the input prediction_tensor represents a probability. If false, it is first converted to a probability by applying sigmoid. """ self._squared_normalization = squared_normalization self.is_prediction_probability = is_prediction_probability super(WeightedDiceClassificationLoss, self).__init__() def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Computes the loss value. Dice loss uses the area of the ground truth and prediction tensors for normalization. We compute area by summing along the anchors (2nd) dimension. Args: prediction_tensor: A float tensor of shape [batch_size, num_pixels, num_classes] representing the predicted logits for each class. num_pixels denotes the total number of pixels in the spatial dimensions of the mask after flattening. target_tensor: A float tensor of shape [batch_size, num_pixels, num_classes] representing one-hot encoded classification targets. num_pixels denotes the total number of pixels in the spatial dimensions of the mask after flattening. weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) if self.is_prediction_probability: prob_tensor = prediction_tensor else: prob_tensor = tf.nn.sigmoid(prediction_tensor) if self._squared_normalization: prob_tensor = tf.pow(prob_tensor, 2) target_tensor = tf.pow(target_tensor, 2) prob_tensor *= weights target_tensor *= weights prediction_area = tf.reduce_sum(prob_tensor, axis=1) gt_area = tf.reduce_sum(target_tensor, axis=1) intersection = tf.reduce_sum(prob_tensor * target_tensor, axis=1) dice_coeff = 2 * intersection / tf.maximum(gt_area + prediction_area, 1.0) dice_loss = 1 - dice_coeff return dice_loss class SigmoidFocalClassificationLoss(Loss): """Sigmoid focal cross entropy loss. Focal loss down-weights well classified examples and focusses on the hard examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. """ def __init__(self, gamma=2.0, alpha=0.25): """Constructor. Args: gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. """ super(SigmoidFocalClassificationLoss, self).__init__() self._alpha = alpha self._gamma = gamma def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=target_tensor, logits=prediction_tensor)) prediction_probabilities = tf.sigmoid(prediction_tensor) p_t = ((target_tensor * prediction_probabilities) + ((1 - target_tensor) * (1 - prediction_probabilities))) modulating_factor = 1.0 if self._gamma: modulating_factor = tf.pow(1.0 - p_t, self._gamma) alpha_weight_factor = 1.0 if self._alpha is not None: alpha_weight_factor = (target_tensor * self._alpha + (1 - target_tensor) * (1 - self._alpha)) focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * per_entry_cross_ent) return focal_cross_entropy_loss * weights class WeightedSoftmaxClassificationLoss(Loss): """Softmax loss function.""" def __init__(self, logit_scale=1.0): """Constructor. Args: logit_scale: When this value is high, the prediction is "diffused" and when this value is low, the prediction is made peakier. (default 1.0) """ super(WeightedSoftmaxClassificationLoss, self).__init__() self._logit_scale = logit_scale def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors] representing the value of the loss function. """ weights = tf.reduce_mean(weights, axis=2) num_classes = prediction_tensor.get_shape().as_list()[-1] prediction_tensor = tf.divide( prediction_tensor, self._logit_scale, name='scale_logit') per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( labels=tf.reshape(target_tensor, [-1, num_classes]), logits=tf.reshape(prediction_tensor, [-1, num_classes]))) return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights class WeightedSoftmaxClassificationAgainstLogitsLoss(Loss): """Softmax loss function against logits. Targets are expected to be provided in logits space instead of "one hot" or "probability distribution" space. """ def __init__(self, logit_scale=1.0): """Constructor. Args: logit_scale: When this value is high, the target is "diffused" and when this value is low, the target is made peakier. (default 1.0) """ super(WeightedSoftmaxClassificationAgainstLogitsLoss, self).__init__() self._logit_scale = logit_scale def _scale_and_softmax_logits(self, logits): """Scale logits then apply softmax.""" scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits') return tf.nn.softmax(scaled_logits, name='convert_scores') def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing logit classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors] representing the value of the loss function. """ weights = tf.reduce_mean(weights, axis=2) num_classes = prediction_tensor.get_shape().as_list()[-1] target_tensor = self._scale_and_softmax_logits(target_tensor) prediction_tensor = tf.divide(prediction_tensor, self._logit_scale, name='scale_logits') per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( labels=tf.reshape(target_tensor, [-1, num_classes]), logits=tf.reshape(prediction_tensor, [-1, num_classes]))) return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights class BootstrappedSigmoidClassificationLoss(Loss): """Bootstrapped sigmoid cross entropy classification loss function. This loss uses a convex combination of training labels and the current model's predictions as training targets in the classification loss. The idea is that as the model improves over time, its predictions can be trusted more and we can use these predictions to mitigate the damage of noisy/incorrect labels, because incorrect labels are likely to be eventually highly inconsistent with other stimuli predicted to have the same label by the model. In "soft" bootstrapping, we use all predicted class probabilities, whereas in "hard" bootstrapping, we use the single class favored by the model. See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by Reed et al. (ICLR 2015). """ def __init__(self, alpha, bootstrap_type='soft'): """Constructor. Args: alpha: a float32 scalar tensor between 0 and 1 representing interpolation weight bootstrap_type: set to either 'hard' or 'soft' (default) Raises: ValueError: if bootstrap_type is not either 'hard' or 'soft' """ super(BootstrappedSigmoidClassificationLoss, self).__init__() if bootstrap_type != 'hard' and bootstrap_type != 'soft': raise ValueError('Unrecognized bootstrap_type: must be one of ' '\'hard\' or \'soft.\'') self._alpha = alpha self._bootstrap_type = bootstrap_type def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if self._bootstrap_type == 'soft': bootstrap_target_tensor = self._alpha * target_tensor + ( 1.0 - self._alpha) * tf.sigmoid(prediction_tensor) else: bootstrap_target_tensor = self._alpha * target_tensor + ( 1.0 - self._alpha) * tf.cast( tf.sigmoid(prediction_tensor) > 0.5, tf.float32) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=bootstrap_target_tensor, logits=prediction_tensor)) return per_entry_cross_ent * weights class HardExampleMiner(object): """Hard example mining for regions in a list of images. Implements hard example mining to select a subset of regions to be back-propagated. For each image, selects the regions with highest losses, subject to the condition that a newly selected region cannot have an IOU > iou_threshold with any of the previously selected regions. This can be achieved by re-using a greedy non-maximum suppression algorithm. A constraint on the number of negatives mined per positive region can also be enforced. Reference papers: "Training Region-based Object Detectors with Online Hard Example Mining" (CVPR 2016) by Srivastava et al., and "SSD: Single Shot MultiBox Detector" (ECCV 2016) by Liu et al. """ def __init__(self, num_hard_examples=64, iou_threshold=0.7, loss_type='both', cls_loss_weight=0.05, loc_loss_weight=0.06, max_negatives_per_positive=None, min_negatives_per_image=0): """Constructor. The hard example mining implemented by this class can replicate the behavior in the two aforementioned papers (Srivastava et al., and Liu et al). To replicate the A2 paper (Srivastava et al), num_hard_examples is set to a fixed parameter (64 by default) and iou_threshold is set to .7 for running non-max-suppression the predicted boxes prior to hard mining. In order to replicate the SSD paper (Liu et al), num_hard_examples should be set to None, max_negatives_per_positive should be 3 and iou_threshold should be 1.0 (in order to effectively turn off NMS). Args: num_hard_examples: maximum number of hard examples to be selected per image (prior to enforcing max negative to positive ratio constraint). If set to None, all examples obtained after NMS are considered. iou_threshold: minimum intersection over union for an example to be discarded during NMS. loss_type: use only classification losses ('cls', default), localization losses ('loc') or both losses ('both'). In the last case, cls_loss_weight and loc_loss_weight are used to compute weighted sum of the two losses. cls_loss_weight: weight for classification loss. loc_loss_weight: weight for location loss. max_negatives_per_positive: maximum number of negatives to retain for each positive anchor. By default, num_negatives_per_positive is None, which means that we do not enforce a prespecified negative:positive ratio. Note also that num_negatives_per_positives can be a float (and will be converted to be a float even if it is passed in otherwise). min_negatives_per_image: minimum number of negative anchors to sample for a given image. Setting this to a positive number allows sampling negatives in an image without any positive anchors and thus not biased towards at least one detection per image. """ self._num_hard_examples = num_hard_examples self._iou_threshold = iou_threshold self._loss_type = loss_type self._cls_loss_weight = cls_loss_weight self._loc_loss_weight = loc_loss_weight self._max_negatives_per_positive = max_negatives_per_positive self._min_negatives_per_image = min_negatives_per_image if self._max_negatives_per_positive is not None: self._max_negatives_per_positive = float(self._max_negatives_per_positive) self._num_positives_list = None self._num_negatives_list = None def __call__(self, location_losses, cls_losses, decoded_boxlist_list, match_list=None): """Computes localization and classification losses after hard mining. Args: location_losses: a float tensor of shape [num_images, num_anchors] representing anchorwise localization losses. cls_losses: a float tensor of shape [num_images, num_anchors] representing anchorwise classification losses. decoded_boxlist_list: a list of decoded BoxList representing location predictions for each image. match_list: an optional list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. Match objects in match_list are used to reference which anchors are positive, negative or ignored. If self._max_negatives_per_positive exists, these are then used to enforce a prespecified negative to positive ratio. Returns: mined_location_loss: a float scalar with sum of localization losses from selected hard examples. mined_cls_loss: a float scalar with sum of classification losses from selected hard examples. Raises: ValueError: if location_losses, cls_losses and decoded_boxlist_list do not have compatible shapes (i.e., they must correspond to the same number of images). ValueError: if match_list is specified but its length does not match len(decoded_boxlist_list). """ mined_location_losses = [] mined_cls_losses = [] location_losses = tf.unstack(location_losses) cls_losses = tf.unstack(cls_losses) num_images = len(decoded_boxlist_list) if not match_list: match_list = num_images * [None] if not len(location_losses) == len(decoded_boxlist_list) == len(cls_losses): raise ValueError('location_losses, cls_losses and decoded_boxlist_list ' 'do not have compatible shapes.') if not isinstance(match_list, list): raise ValueError('match_list must be a list.') if len(match_list) != len(decoded_boxlist_list): raise ValueError('match_list must either be None or have ' 'length=len(decoded_boxlist_list).') num_positives_list = [] num_negatives_list = [] for ind, detection_boxlist in enumerate(decoded_boxlist_list): box_locations = detection_boxlist.get() match = match_list[ind] image_losses = cls_losses[ind] if self._loss_type == 'loc': image_losses = location_losses[ind] elif self._loss_type == 'both': image_losses *= self._cls_loss_weight image_losses += location_losses[ind] * self._loc_loss_weight if self._num_hard_examples is not None: num_hard_examples = self._num_hard_examples else: num_hard_examples = detection_boxlist.num_boxes() selected_indices = tf.image.non_max_suppression( box_locations, image_losses, num_hard_examples, self._iou_threshold) if self._max_negatives_per_positive is not None and match: (selected_indices, num_positives, num_negatives) = self._subsample_selection_to_desired_neg_pos_ratio( selected_indices, match, self._max_negatives_per_positive, self._min_negatives_per_image) num_positives_list.append(num_positives) num_negatives_list.append(num_negatives) mined_location_losses.append( tf.reduce_sum(tf.gather(location_losses[ind], selected_indices))) mined_cls_losses.append( tf.reduce_sum(tf.gather(cls_losses[ind], selected_indices))) location_loss = tf.reduce_sum(tf.stack(mined_location_losses)) cls_loss = tf.reduce_sum(tf.stack(mined_cls_losses)) if match and self._max_negatives_per_positive: self._num_positives_list = num_positives_list self._num_negatives_list = num_negatives_list return (location_loss, cls_loss) def summarize(self): """Summarize the number of positives and negatives after mining.""" if self._num_positives_list and self._num_negatives_list: avg_num_positives = tf.reduce_mean( tf.cast(self._num_positives_list, dtype=tf.float32)) avg_num_negatives = tf.reduce_mean( tf.cast(self._num_negatives_list, dtype=tf.float32)) tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives) tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives) def _subsample_selection_to_desired_neg_pos_ratio(self, indices, match, max_negatives_per_positive, min_negatives_per_image=0): """Subsample a collection of selected indices to a desired neg:pos ratio. This function takes a subset of M indices (indexing into a large anchor collection of N anchors where M<N) which are labeled as positive/negative via a Match object (matched indices are positive, unmatched indices are negative). It returns a subset of the provided indices retaining all positives as well as up to the first K negatives, where: K=floor(num_negative_per_positive * num_positives). For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors), with positives=[2, 5] and negatives=[4, 7, 9, 10] and num_negatives_per_positive=1, then the returned subset of indices is [2, 4, 5, 7]. Args: indices: An integer tensor of shape [M] representing a collection of selected anchor indices match: A matcher.Match object encoding the match between anchors and groundtruth boxes for a given image, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. max_negatives_per_positive: (float) maximum number of negatives for each positive anchor. min_negatives_per_image: minimum number of negative anchors for a given image. Allow sampling negatives in image without any positive anchors. Returns: selected_indices: An integer tensor of shape [M'] representing a collection of selected anchor indices with M' <= M. num_positives: An integer tensor representing the number of positive examples in selected set of indices. num_negatives: An integer tensor representing the number of negative examples in selected set of indices. """ positives_indicator = tf.gather(match.matched_column_indicator(), indices) negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices) num_positives = tf.reduce_sum(tf.cast(positives_indicator, dtype=tf.int32)) max_negatives = tf.maximum( min_negatives_per_image, tf.cast(max_negatives_per_positive * tf.cast(num_positives, dtype=tf.float32), dtype=tf.int32)) topk_negatives_indicator = tf.less_equal( tf.cumsum(tf.cast(negatives_indicator, dtype=tf.int32)), max_negatives) subsampled_selection_indices = tf.where( tf.logical_or(positives_indicator, topk_negatives_indicator)) num_negatives = tf.size(subsampled_selection_indices) - num_positives return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]), num_positives, num_negatives) class PenaltyReducedLogisticFocalLoss(Loss): """Penalty-reduced pixelwise logistic regression with focal loss. The loss is defined in Equation (1) of the Objects as Points[1] paper. Although the loss is defined per-pixel in the output space, this class assumes that each pixel is an anchor to be compatible with the base class. [1]: https://arxiv.org/abs/1904.07850 """ def __init__(self, alpha=2.0, beta=4.0, sigmoid_clip_value=1e-4): """Constructor. Args: alpha: Focussing parameter of the focal loss. Increasing this will decrease the loss contribution of the well classified examples. beta: The local penalty reduction factor. Increasing this will decrease the contribution of loss due to negative pixels near the keypoint. sigmoid_clip_value: The sigmoid operation used internally will be clipped between [sigmoid_clip_value, 1 - sigmoid_clip_value) """ self._alpha = alpha self._beta = beta self._sigmoid_clip_value = sigmoid_clip_value super(PenaltyReducedLogisticFocalLoss, self).__init__() def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. In all input tensors, `num_anchors` is the total number of pixels in the the output space. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted unscaled logits for each class. The function will compute sigmoid on this tensor internally. target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing a tensor with the 'splatted' keypoints, possibly using a gaussian kernel. This function assumes that the target is bounded between [0, 1]. weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ is_present_tensor = tf.math.equal(target_tensor, 1.0) prediction_tensor = tf.clip_by_value(tf.sigmoid(prediction_tensor), self._sigmoid_clip_value, 1 - self._sigmoid_clip_value) positive_loss = (tf.math.pow((1 - prediction_tensor), self._alpha)* tf.math.log(prediction_tensor)) negative_loss = (tf.math.pow((1 - target_tensor), self._beta)* tf.math.pow(prediction_tensor, self._alpha)* tf.math.log(1 - prediction_tensor)) loss = -tf.where(is_present_tensor, positive_loss, negative_loss) return loss * weights class L1LocalizationLoss(Loss): """L1 loss or absolute difference. When used in a per-pixel manner, each pixel should be given as an anchor. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ return tf.losses.absolute_difference( target_tensor, prediction_tensor, weights=weights, loss_collection=None, reduction=tf.losses.Reduction.NONE )
38,050
41.753933
80
py
models
models-master/research/object_detection/core/keypoint_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keypoint operations. Keypoints are represented as tensors of shape [num_instances, num_keypoints, 2], where the last dimension holds rank 2 tensors of the form [y, x] representing the coordinates of the keypoint. """ import numpy as np import tensorflow.compat.v1 as tf from object_detection.utils import shape_utils def scale(keypoints, y_scale, x_scale, scope=None): """Scales keypoint coordinates in x and y dimensions. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) new_keypoints = keypoints * [[[y_scale, x_scale]]] return new_keypoints def clip_to_window(keypoints, window, scope=None): """Clips keypoints to a window. This op clips any input keypoints to a window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip the keypoints. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'ClipToWindow'): y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) y = tf.maximum(tf.minimum(y, win_y_max), win_y_min) x = tf.maximum(tf.minimum(x, win_x_max), win_x_min) new_keypoints = tf.concat([y, x], 2) return new_keypoints def prune_outside_window(keypoints, window, scope=None): """Prunes keypoints that fall outside a given window. This function replaces keypoints that fall outside the given window with nan. See also clip_to_window which clips any keypoints that fall outside the given window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window outside of which the op should prune the keypoints. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'PruneOutsideWindow'): y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) valid_indices = tf.logical_and( tf.logical_and(y >= win_y_min, y <= win_y_max), tf.logical_and(x >= win_x_min, x <= win_x_max)) new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y)) new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x)) new_keypoints = tf.concat([new_y, new_x], 2) return new_keypoints def change_coordinate_frame(keypoints, window, scope=None): """Changes coordinate frame of the keypoints to be relative to window's frame. Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint coordinates from keypoints of shape [num_instances, num_keypoints, 2] to be relative to this window. An example use case is data augmentation: where we are given groundtruth keypoints and would like to randomly crop the image to some window. In this case we need to change the coordinate frame of each groundtruth keypoint to be relative to this new window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window we should change the coordinate frame to. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'ChangeCoordinateFrame'): win_height = window[2] - window[0] win_width = window[3] - window[1] new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height, 1.0 / win_width) return new_keypoints def keypoints_to_enclosing_bounding_boxes(keypoints, keypoints_axis=1): """Creates enclosing bounding boxes from keypoints. Args: keypoints: a [num_instances, num_keypoints, 2] float32 tensor with keypoints in [y, x] format. keypoints_axis: An integer indicating the axis that correspond to the keypoint dimension. Returns: A [num_instances, 4] float32 tensor that tightly covers all the keypoints for each instance. """ ymin = tf.math.reduce_min(keypoints[..., 0], axis=keypoints_axis) xmin = tf.math.reduce_min(keypoints[..., 1], axis=keypoints_axis) ymax = tf.math.reduce_max(keypoints[..., 0], axis=keypoints_axis) xmax = tf.math.reduce_max(keypoints[..., 1], axis=keypoints_axis) return tf.stack([ymin, xmin, ymax, xmax], axis=keypoints_axis) def to_normalized_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts absolute keypoint coordinates to normalized coordinates in [0, 1]. Usually one uses the dynamic shape of the image or conv-layer tensor: keypoints = keypoint_ops.to_normalized_coordinates(keypoints, tf.shape(images)[1], tf.shape(images)[2]), This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2]. height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with normalized coordinates in [0, 1]. """ with tf.name_scope(scope, 'ToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, 1.0 / height, 1.0 / width) def to_absolute_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts normalized keypoint coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum keypoint coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2] height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input keypoints is correct. if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater_equal(1.01, max_val), ['maximum keypoint coordinate value is larger ' 'than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, height, width) def flip_horizontal(keypoints, flip_point, flip_permutation=None, scope=None): """Flips the keypoints horizontally around the flip_point. This operation flips the x coordinate for each keypoint around the flip_point and also permutes the keypoints in a manner specified by flip_permutation. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] flip_point: (float) scalar tensor representing the x coordinate to flip the keypoints around. flip_permutation: integer list or rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the flipped keypoint indices. This is used primarily for keypoints that are not reflection invariant. E.g. Suppose there are 3 keypoints representing ['head', 'right_eye', 'left_eye'], then a logical choice for flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' and 'right_eye' after a horizontal flip. Default to None or empty list to keep the original order after flip. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'FlipHorizontal'): keypoints = tf.transpose(keypoints, [1, 0, 2]) if flip_permutation: keypoints = tf.gather(keypoints, flip_permutation) v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) u = flip_point * 2.0 - u new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def flip_vertical(keypoints, flip_point, flip_permutation=None, scope=None): """Flips the keypoints vertically around the flip_point. This operation flips the y coordinate for each keypoint around the flip_point and also permutes the keypoints in a manner specified by flip_permutation. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] flip_point: (float) scalar tensor representing the y coordinate to flip the keypoints around. flip_permutation: integer list or rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the flipped keypoint indices. This is used primarily for keypoints that are not reflection invariant. E.g. Suppose there are 3 keypoints representing ['head', 'right_eye', 'left_eye'], then a logical choice for flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' and 'right_eye' after a horizontal flip. Default to None or empty list to keep the original order after flip. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'FlipVertical'): keypoints = tf.transpose(keypoints, [1, 0, 2]) if flip_permutation: keypoints = tf.gather(keypoints, flip_permutation) v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) v = flip_point * 2.0 - v new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def rot90(keypoints, rotation_permutation=None, scope=None): """Rotates the keypoints counter-clockwise by 90 degrees. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] rotation_permutation: integer list or rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the rotated keypoint indices. This is used primarily for keypoints that are not rotation invariant. Default to None or empty list to keep the original order after rotation. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ keypoints.get_shape().assert_has_rank(3) with tf.name_scope(scope, 'Rot90'): keypoints = tf.transpose(keypoints, [1, 0, 2]) if rotation_permutation: keypoints = tf.gather(keypoints, rotation_permutation) v, u = tf.split(value=keypoints[:, :, ::-1], num_or_size_splits=2, axis=2) v = 1.0 - v new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def keypoint_weights_from_visibilities(keypoint_visibilities, per_keypoint_weights=None): """Returns a keypoint weights tensor. During training, it is often beneficial to consider only those keypoints that are labeled. This function returns a weights tensor that combines default per-keypoint weights, as well as the visibilities of individual keypoints. The returned tensor satisfies: keypoint_weights[i, k] = per_keypoint_weights[k] * keypoint_visibilities[i, k] where per_keypoint_weights[k] is set to 1 if not provided. Args: keypoint_visibilities: A [num_instances, num_keypoints] boolean tensor indicating whether a keypoint is labeled (and perhaps even visible). per_keypoint_weights: A list or 1-d tensor of length `num_keypoints` with per-keypoint weights. If None, will use 1 for each visible keypoint weight. Returns: A [num_instances, num_keypoints] float32 tensor with keypoint weights. Those keypoints deemed visible will have the provided per-keypoint weight, and all others will be set to zero. """ keypoint_visibilities.get_shape().assert_has_rank(2) if per_keypoint_weights is None: num_keypoints = shape_utils.combined_static_and_dynamic_shape( keypoint_visibilities)[1] per_keypoint_weight_mult = tf.ones((1, num_keypoints,), dtype=tf.float32) else: per_keypoint_weight_mult = tf.expand_dims(per_keypoint_weights, axis=0) return per_keypoint_weight_mult * tf.cast(keypoint_visibilities, tf.float32) def set_keypoint_visibilities(keypoints, initial_keypoint_visibilities=None): """Sets keypoint visibilities based on valid/invalid keypoints. Some keypoint operations set invisible keypoints (e.g. cropped keypoints) to NaN, without affecting any keypoint "visibility" variables. This function is used to update (or create) keypoint visibilities to agree with visible / invisible keypoint coordinates. Args: keypoints: a float32 tensor of shape [num_instances, num_keypoints, 2]. initial_keypoint_visibilities: a boolean tensor of shape [num_instances, num_keypoints]. If provided, will maintain the visibility designation of a keypoint, so long as the corresponding coordinates are not NaN. If not provided, will create keypoint visibilities directly from the values in `keypoints` (i.e. NaN coordinates map to False, otherwise they map to True). Returns: keypoint_visibilities: a bool tensor of shape [num_instances, num_keypoints] indicating whether a keypoint is visible or not. """ keypoints.get_shape().assert_has_rank(3) if initial_keypoint_visibilities is not None: keypoint_visibilities = tf.cast(initial_keypoint_visibilities, tf.bool) else: keypoint_visibilities = tf.ones_like(keypoints[:, :, 0], dtype=tf.bool) keypoints_with_nan = tf.math.reduce_any(tf.math.is_nan(keypoints), axis=2) keypoint_visibilities = tf.where( keypoints_with_nan, tf.zeros_like(keypoint_visibilities, dtype=tf.bool), keypoint_visibilities) return keypoint_visibilities
16,151
40.309463
80
py
models
models-master/research/object_detection/core/minibatch_sampler.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base minibatch sampler module. The job of the minibatch_sampler is to subsample a minibatch based on some criterion. The main function call is: subsample(indicator, batch_size, **params). Indicator is a 1d boolean tensor where True denotes which examples can be sampled. It returns a boolean indicator where True denotes an example has been sampled.. Subclasses should implement the Subsample function and can make use of the @staticmethod SubsampleIndicator. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import ABCMeta from abc import abstractmethod import six import tensorflow.compat.v1 as tf from object_detection.utils import ops class MinibatchSampler(six.with_metaclass(ABCMeta, object)): """Abstract base class for subsampling minibatches.""" def __init__(self): """Constructs a minibatch sampler.""" pass @abstractmethod def subsample(self, indicator, batch_size, **params): """Returns subsample of entries in indicator. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. batch_size: desired batch size. **params: additional keyword arguments for specific implementations of the MinibatchSampler. Returns: sample_indicator: boolean tensor of shape [N] whose True entries have been sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size """ pass @staticmethod def subsample_indicator(indicator, num_samples): """Subsample indicator vector. Given a boolean indicator vector with M elements set to `True`, the function assigns all but `num_samples` of these previously `True` elements to `False`. If `num_samples` is greater than M, the original indicator vector is returned. Args: indicator: a 1-dimensional boolean tensor indicating which elements are allowed to be sampled and which are not. num_samples: int32 scalar tensor Returns: a boolean tensor with the same shape as input (indicator) tensor """ indices = tf.where(indicator) indices = tf.random_shuffle(indices) indices = tf.reshape(indices, [-1]) num_samples = tf.minimum(tf.size(indices), num_samples) selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0]) return tf.equal(selected_indicator, 1)
3,231
33.021053
80
py
models
models-master/research/object_detection/core/box_coder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.box_coder.""" import tensorflow.compat.v1 as tf from object_detection.core import box_coder from object_detection.core import box_list from object_detection.utils import test_case class MockBoxCoder(box_coder.BoxCoder): """Test BoxCoder that encodes/decodes using the multiply-by-two function.""" def code_size(self): return 4 def _encode(self, boxes, anchors): return 2.0 * boxes.get() def _decode(self, rel_codes, anchors): return box_list.BoxList(rel_codes / 2.0) class BoxCoderTest(test_case.TestCase): def test_batch_decode(self): expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]], [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]] def graph_fn(): mock_anchor_corners = tf.constant( [[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32) mock_anchors = box_list.BoxList(mock_anchor_corners) mock_box_coder = MockBoxCoder() encoded_boxes_list = [mock_box_coder.encode( box_list.BoxList(tf.constant(boxes)), mock_anchors) for boxes in expected_boxes] encoded_boxes = tf.stack(encoded_boxes_list) decoded_boxes = box_coder.batch_decode( encoded_boxes, mock_box_coder, mock_anchors) return decoded_boxes decoded_boxes_result = self.execute(graph_fn, []) self.assertAllClose(expected_boxes, decoded_boxes_result) if __name__ == '__main__': tf.test.main()
2,166
33.396825
80
py
models
models-master/research/object_detection/core/anchor_generator.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base anchor generator. The job of the anchor generator is to create (or load) a collection of bounding boxes to be used as anchors. Generated anchors are assumed to match some convolutional grid or list of grid shapes. For example, we might want to generate anchors matching an 8x8 feature map and a 4x4 feature map. If we place 3 anchors per grid location on the first feature map and 6 anchors per grid location on the second feature map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total. To support fully convolutional settings, feature map shapes are passed dynamically at generation time. The number of anchors to place at each location is static --- implementations of AnchorGenerator must always be able return the number of anchors that it uses per location for each feature map. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import ABCMeta from abc import abstractmethod import six import tensorflow.compat.v1 as tf class AnchorGenerator(six.with_metaclass(ABCMeta, object)): """Abstract base class for anchor generators.""" @abstractmethod def name_scope(self): """Name scope. Must be defined by implementations. Returns: a string representing the name scope of the anchor generation operation. """ pass @property def check_num_anchors(self): """Whether to dynamically check the number of anchors generated. Can be overridden by implementations that would like to disable this behavior. Returns: a boolean controlling whether the Generate function should dynamically check the number of anchors generated against the mathematically expected number of anchors. """ return True @abstractmethod def num_anchors_per_location(self): """Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the `generate` function. """ pass def generate(self, feature_map_shape_list, **params): """Generates a collection of bounding boxes to be used as anchors. TODO(rathodv): remove **params from argument list and make stride and offsets (for multiple_grid_anchor_generator) constructor arguments. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. Pairs can be provided as 1-dimensional integer tensors of length 2 or simply as tuples of integers. **params: parameters for anchor generation op Returns: boxes_list: a list of BoxLists each holding anchor boxes corresponding to the input feature map shapes. Raises: ValueError: if the number of feature map shapes does not match the length of NumAnchorsPerLocation. """ if self.check_num_anchors and ( len(feature_map_shape_list) != len(self.num_anchors_per_location())): raise ValueError('Number of feature maps is expected to equal the length ' 'of `num_anchors_per_location`.') with tf.name_scope(self.name_scope()): anchors_list = self._generate(feature_map_shape_list, **params) if self.check_num_anchors: for item in anchors_list: item.set(tf.identity(item.get())) return anchors_list @abstractmethod def _generate(self, feature_map_shape_list, **params): """To be overridden by implementations. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. **params: parameters for anchor generation op Returns: boxes_list: a list of BoxList, each holding a collection of N anchor boxes. """ pass def anchor_index_to_feature_map_index(self, boxlist_list): """Returns a 1-D array of feature map indices for each anchor. Args: boxlist_list: a list of Boxlist, each holding a collection of N anchor boxes. This list is produced in self.generate(). Returns: A [num_anchors] integer array, where each element indicates which feature map index the anchor belongs to. """ feature_map_indices_list = [] for i, boxes in enumerate(boxlist_list): feature_map_indices_list.append( i * tf.ones([boxes.num_boxes()], dtype=tf.int32)) return tf.concat(feature_map_indices_list, axis=0)
5,245
34.931507
80
py
models
models-master/research/object_detection/core/batcher.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides functions to batch a dictionary of input tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from six.moves import range import tensorflow.compat.v1 as tf from object_detection.core import prefetcher rt_shape_str = '_runtime_shapes' class BatchQueue(object): """BatchQueue class. This class creates a batch queue to asynchronously enqueue tensors_dict. It also adds a FIFO prefetcher so that the batches are readily available for the consumers. Dequeue ops for a BatchQueue object can be created via the Dequeue method which evaluates to a batch of tensor_dict. Example input pipeline with batching: ------------------------------------ key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) batch_queue = batcher.BatchQueue(tensor_dict, batch_size=32, batch_queue_capacity=2000, num_batch_queue_threads=8, prefetch_queue_capacity=20) tensor_dict = batch_queue.dequeue() outputs = Model(tensor_dict) ... ----------------------------------- Notes: ----- This class batches tensors of unequal sizes by zero padding and unpadding them after generating a batch. This can be computationally expensive when batching tensors (such as images) that are of vastly different sizes. So it is recommended that the shapes of such tensors be fully defined in tensor_dict while other lightweight tensors such as bounding box corners and class labels can be of varying sizes. Use either crop or resize operations to fully define the shape of an image in tensor_dict. It is also recommended to perform any preprocessing operations on tensors before passing to BatchQueue and subsequently calling the Dequeue method. Another caveat is that this class does not read the last batch if it is not full. The current implementation makes it hard to support that use case. So, for evaluation, when it is critical to run all the examples through your network use the input pipeline example mentioned in core/prefetcher.py. """ def __init__(self, tensor_dict, batch_size, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity): """Constructs a batch queue holding tensor_dict. Args: tensor_dict: dictionary of tensors to batch. batch_size: batch size. batch_queue_capacity: max capacity of the queue from which the tensors are batched. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: max capacity of the queue used to prefetch assembled batches. """ # Remember static shapes to set shapes of batched tensors. static_shapes = collections.OrderedDict( {key: tensor.get_shape() for key, tensor in tensor_dict.items()}) # Remember runtime shapes to unpad tensors after batching. runtime_shapes = collections.OrderedDict( {(key + rt_shape_str): tf.shape(tensor) for key, tensor in tensor_dict.items()}) all_tensors = tensor_dict all_tensors.update(runtime_shapes) batched_tensors = tf.train.batch( all_tensors, capacity=batch_queue_capacity, batch_size=batch_size, dynamic_pad=True, num_threads=num_batch_queue_threads) self._queue = prefetcher.prefetch(batched_tensors, prefetch_queue_capacity) self._static_shapes = static_shapes self._batch_size = batch_size def dequeue(self): """Dequeues a batch of tensor_dict from the BatchQueue. TODO: use allow_smaller_final_batch to allow running over the whole eval set Returns: A list of tensor_dicts of the requested batch_size. """ batched_tensors = self._queue.dequeue() # Separate input tensors from tensors containing their runtime shapes. tensors = {} shapes = {} for key, batched_tensor in batched_tensors.items(): unbatched_tensor_list = tf.unstack(batched_tensor) for i, unbatched_tensor in enumerate(unbatched_tensor_list): if rt_shape_str in key: shapes[(key[:-len(rt_shape_str)], i)] = unbatched_tensor else: tensors[(key, i)] = unbatched_tensor # Undo that padding using shapes and create a list of size `batch_size` that # contains tensor dictionaries. tensor_dict_list = [] batch_size = self._batch_size for batch_id in range(batch_size): tensor_dict = {} for key in self._static_shapes: tensor_dict[key] = tf.slice(tensors[(key, batch_id)], tf.zeros_like(shapes[(key, batch_id)]), shapes[(key, batch_id)]) tensor_dict[key].set_shape(self._static_shapes[key]) tensor_dict_list.append(tensor_dict) return tensor_dict_list
5,765
39.605634
80
py
models
models-master/research/object_detection/core/region_similarity_calculator_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for region_similarity_calculator.""" import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.core import region_similarity_calculator from object_detection.core import standard_fields as fields from object_detection.utils import test_case class RegionSimilarityCalculatorTest(test_case.TestCase): def test_get_correct_pairwise_similarity_based_on_iou(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) iou_similarity_calculator = region_similarity_calculator.IouSimilarity() iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2) return iou_similarity exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]] iou_output = self.execute(graph_fn, []) self.assertAllClose(iou_output, exp_output) def test_get_correct_pairwise_similarity_based_on_squared_distances(self): def graph_fn(): corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 2.0]]) corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 3.0], [0.0, 0.0, 0.0, 0.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) dist_similarity_calc = region_similarity_calculator.NegSqDistSimilarity() dist_similarity = dist_similarity_calc.compare(boxes1, boxes2) return dist_similarity exp_output = [[-26, -25, 0], [-18, -27, -6]] iou_output = self.execute(graph_fn, []) self.assertAllClose(iou_output, exp_output) def test_get_correct_pairwise_similarity_based_on_ioa(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) ioa_similarity_calculator = region_similarity_calculator.IoaSimilarity() ioa_similarity_1 = ioa_similarity_calculator.compare(boxes1, boxes2) ioa_similarity_2 = ioa_similarity_calculator.compare(boxes2, boxes1) return ioa_similarity_1, ioa_similarity_2 exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0], [1.0 / 12.0, 0.0, 5.0 / 400.0]] exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0], [0, 0], [6.0 / 6.0, 5.0 / 5.0]] iou_output_1, iou_output_2 = self.execute(graph_fn, []) self.assertAllClose(iou_output_1, exp_output_1) self.assertAllClose(iou_output_2, exp_output_2) def test_get_correct_pairwise_similarity_based_on_thresholded_iou(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) scores = tf.constant([.3, .6]) iou_threshold = .013 boxes1 = box_list.BoxList(corners1) boxes1.add_field(fields.BoxListFields.scores, scores) boxes2 = box_list.BoxList(corners2) iou_similarity_calculator = ( region_similarity_calculator.ThresholdedIouSimilarity( iou_threshold=iou_threshold)) iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2) return iou_similarity exp_output = tf.constant([[0.3, 0., 0.3], [0.6, 0., 0.]]) iou_output = self.execute(graph_fn, []) self.assertAllClose(iou_output, exp_output) def test_detr_similarity(self): def graph_fn(): corners1 = tf.constant([[5.0, 7.0, 7.0, 9.0]]) corners2 = tf.constant([[5.0, 7.0, 7.0, 9.0], [5.0, 11.0, 7.0, 13.0]]) groundtruth_labels = tf.constant([[1.0, 0.0]]) predicted_labels = tf.constant([[0.0, 1000.0], [1000.0, 0.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) boxes1.add_field(fields.BoxListFields.classes, groundtruth_labels) boxes2.add_field(fields.BoxListFields.classes, predicted_labels) detr_similarity_calculator = \ region_similarity_calculator.DETRSimilarity() detr_similarity = detr_similarity_calculator.compare( boxes1, boxes2, None) return detr_similarity exp_output = [[0.0, -20 - 8.0/3.0 + 1000.0]] sim_output = self.execute(graph_fn, []) self.assertAllClose(sim_output, exp_output) if __name__ == '__main__': tf.test.main()
5,420
44.940678
80
py
models
models-master/research/object_detection/core/multiclass_nms_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow_models.object_detection.core.post_processing.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.core import post_processing from object_detection.core import standard_fields as fields from object_detection.utils import test_case class MulticlassNonMaxSuppressionTest(test_case.TestCase): def test_multiclass_nms_select_with_shared_boxes_cpu_only(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) def graph_fn(boxes, scores): score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 nms, _ = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size) return (nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes)) exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]] exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 0, 1, 0] (nms_corners_output, nms_scores_output, nms_classes_output) = self.execute_cpu(graph_fn, [boxes, scores]) self.assertAllClose(nms_corners_output, exp_nms_corners) self.assertAllClose(nms_scores_output, exp_nms_scores) self.assertAllClose(nms_classes_output, exp_nms_classes) def test_multiclass_nms_select_with_shared_boxes_pad_to_max_output_size(self): boxes = np.array([[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_size_per_class = 4 max_output_size = 5 exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]] exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 0, 1, 0] def graph_fn(boxes, scores): nms, num_valid_nms_boxes = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class, max_total_size=max_output_size, pad_to_max_output_size=True) return [nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), num_valid_nms_boxes] [nms_corners_output, nms_scores_output, nms_classes_output, num_valid_nms_boxes] = self.execute(graph_fn, [boxes, scores]) self.assertEqual(num_valid_nms_boxes, 4) self.assertAllClose(nms_corners_output[0:num_valid_nms_boxes], exp_nms_corners) self.assertAllClose(nms_scores_output[0:num_valid_nms_boxes], exp_nms_scores) self.assertAllClose(nms_classes_output[0:num_valid_nms_boxes], exp_nms_classes) def test_multiclass_nms_select_with_shared_boxes_given_keypoints(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) num_keypoints = 6 keypoints = np.tile(np.reshape(range(8), [8, 1, 1]), [1, num_keypoints, 2]).astype(np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 def graph_fn(boxes, scores, keypoints): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size, pad_to_max_output_size=True, additional_fields={fields.BoxListFields.keypoints: keypoints}) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms.get_field(fields.BoxListFields.keypoints), nms_valid ] exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]] exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 0, 1, 0] exp_nms_keypoints = np.tile( np.reshape(np.array([3, 0, 6, 5], np.float32), [4, 1, 1]), [1, num_keypoints, 2]) (nms_corners_output, nms_scores_output, nms_classes_output, nms_keypoints, nms_valid) = self.execute(graph_fn, [boxes, scores, keypoints]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) self.assertAllEqual(nms_keypoints[:nms_valid], exp_nms_keypoints) def test_multiclass_nms_with_shared_boxes_given_keypoint_heatmaps(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) num_boxes = boxes.shape[0] heatmap_height = 5 heatmap_width = 5 num_keypoints = 17 keypoint_heatmaps = np.ones( [num_boxes, heatmap_height, heatmap_width, num_keypoints], dtype=np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]] exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 0, 1, 0] exp_nms_keypoint_heatmaps = np.ones( (4, heatmap_height, heatmap_width, num_keypoints), dtype=np.float32) def graph_fn(boxes, scores, keypoint_heatmaps): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size, pad_to_max_output_size=True, additional_fields={ fields.BoxListFields.keypoint_heatmaps: keypoint_heatmaps }) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms.get_field(fields.BoxListFields.keypoint_heatmaps), nms_valid ] (nms_corners_output, nms_scores_output, nms_classes_output, nms_keypoint_heatmaps, nms_valid) = self.execute(graph_fn, [boxes, scores, keypoint_heatmaps]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) self.assertAllEqual(nms_keypoint_heatmaps[:nms_valid], exp_nms_keypoint_heatmaps) def test_multiclass_nms_with_additional_fields(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) coarse_boxes_key = 'coarse_boxes' coarse_boxes = np.array( [[0.1, 0.1, 1.1, 1.1], [0.1, 0.2, 1.1, 1.2], [0.1, -0.2, 1.1, 1.0], [0.1, 10.1, 1.1, 11.1], [0.1, 10.2, 1.1, 11.2], [ 0.1, 100.1, 1.1, 101.1 ], [0.1, 1000.1, 1.1, 1002.1], [0.1, 1000.1, 1.1, 1002.2]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]], dtype=np.float32) exp_nms_coarse_corners = np.array([[0.1, 10.1, 1.1, 11.1], [0.1, 0.1, 1.1, 1.1], [0.1, 1000.1, 1.1, 1002.1], [0.1, 100.1, 1.1, 101.1]], dtype=np.float32) exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 0, 1, 0] def graph_fn(boxes, scores, coarse_boxes): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size, pad_to_max_output_size=True, additional_fields={coarse_boxes_key: coarse_boxes}) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms.get_field(coarse_boxes_key), nms_valid, ] (nms_corners_output, nms_scores_output, nms_classes_output, nms_coarse_corners, nms_valid) = self.execute(graph_fn, [boxes, scores, coarse_boxes]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) self.assertAllEqual(nms_coarse_corners[:nms_valid], exp_nms_coarse_corners) def test_multiclass_nms_select_with_shared_boxes_given_masks(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) num_classes = 2 mask_height = 3 mask_width = 3 masks = np.tile( np.reshape(range(8), [8, 1, 1, 1]), [1, num_classes, mask_height, mask_width]) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]] exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 0, 1, 0] exp_nms_masks_tensor = np.tile( np.reshape(np.array([3, 0, 6, 5], np.float32), [4, 1, 1]), [1, mask_height, mask_width]) def graph_fn(boxes, scores, masks): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size, masks=masks, pad_to_max_output_size=True) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms.get_field(fields.BoxListFields.masks), nms_valid ] (nms_corners_output, nms_scores_output, nms_classes_output, nms_masks, nms_valid) = self.execute(graph_fn, [boxes, scores, masks]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) self.assertAllEqual(nms_masks[:nms_valid], exp_nms_masks_tensor) def test_multiclass_nms_select_with_clip_window(self): boxes = np.array([[[0, 0, 10, 10]], [[1, 1, 11, 11]]], np.float32) scores = np.array([[.9], [.75]], np.float32) clip_window = np.array([5, 4, 8, 7], np.float32) score_thresh = 0.0 iou_thresh = 0.5 max_output_size = 100 exp_nms_corners = [[5, 4, 8, 7]] exp_nms_scores = [.9] exp_nms_classes = [0] def graph_fn(boxes, scores, clip_window): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size, pad_to_max_output_size=True, clip_window=clip_window) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms_valid ] (nms_corners_output, nms_scores_output, nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores, clip_window]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) def test_multiclass_nms_select_with_clip_window_change_coordinate_frame(self): boxes = np.array([[[0, 0, 10, 10]], [[1, 1, 11, 11]]], np.float32) scores = np.array([[.9], [.75]], np.float32) clip_window = np.array([5, 4, 8, 7], np.float32) score_thresh = 0.0 iou_thresh = 0.5 max_output_size = 100 exp_nms_corners = [[0, 0, 1, 1]] exp_nms_scores = [.9] exp_nms_classes = [0] def graph_fn(boxes, scores, clip_window): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size, clip_window=clip_window, pad_to_max_output_size=True, change_coordinate_frame=True) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms_valid ] (nms_corners_output, nms_scores_output, nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores, clip_window]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) def test_multiclass_nms_select_with_per_class_cap(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_size_per_class = 2 exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002]] exp_nms_scores = [.95, .9, .85] exp_nms_classes = [0, 0, 1] def graph_fn(boxes, scores): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class, pad_to_max_output_size=True) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms_valid ] (nms_corners_output, nms_scores_output, nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) def test_multiclass_nms_select_with_total_cap(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_size_per_class = 4 max_total_size = 2 exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1]] exp_nms_scores = [.95, .9] exp_nms_classes = [0, 0] def graph_fn(boxes, scores): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class, max_total_size, pad_to_max_output_size=True) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms_valid ] (nms_corners_output, nms_scores_output, nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) def test_multiclass_nms_threshold_then_select_with_shared_boxes(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9], [.75], [.6], [.95], [.5], [.3], [.01], [.01]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 3 exp_nms = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]] def graph_fn(boxes, scores): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size, pad_to_max_output_size=True) return nms.get(), nms_valid nms_output, nms_valid = self.execute(graph_fn, [boxes, scores]) self.assertAllClose(nms_output[:nms_valid], exp_nms) def test_multiclass_nms_select_with_separate_boxes(self): boxes = np.array( [[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [ 0, 10, 1, 11 ]], [[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 999, 2, 1004], [0, 100, 1, 101]] exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 0, 1, 0] def graph_fn(boxes, scores): nms, nms_valid = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_output_size, pad_to_max_output_size=True) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes), nms_valid ] (nms_corners_output, nms_scores_output, nms_classes_output, nms_valid) = self.execute(graph_fn, [boxes, scores]) self.assertAllClose(nms_corners_output[:nms_valid], exp_nms_corners) self.assertAllClose(nms_scores_output[:nms_valid], exp_nms_scores) self.assertAllClose(nms_classes_output[:nms_valid], exp_nms_classes) def test_multiclass_soft_nms_select_with_shared_boxes_cpu_only(self): boxes = np.array( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], np.float32) scores = np.array([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]], np.float32) score_thresh = 0.1 iou_thresh = 1.0 max_output_size = 4 exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 0.1, 1, 1.1]] exp_nms_scores = [.95, .9, .85, .384] exp_nms_classes = [0, 0, 1, 0] def graph_fn(boxes, scores): nms, _ = post_processing.multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, soft_nms_sigma=0.5) return [ nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes) ] (nms_corners_output, nms_scores_output, nms_classes_output) = self.execute_cpu(graph_fn, [boxes, scores]) self.assertAllClose( nms_corners_output, exp_nms_corners, rtol=1e-2, atol=1e-2) self.assertAllClose(nms_scores_output, exp_nms_scores, rtol=1e-2, atol=1e-2) self.assertAllClose( nms_classes_output, exp_nms_classes, rtol=1e-2, atol=1e-2) if __name__ == '__main__': tf.test.main()
22,866
38.155822
80
py
models
models-master/research/object_detection/core/keypoint_ops_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.keypoint_ops.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.core import keypoint_ops from object_detection.utils import test_case class KeypointOpsTest(test_case.TestCase): """Tests for common keypoint operations.""" def test_scale(self): def graph_fn(): keypoints = tf.constant([ [[0.0, 0.0], [100.0, 200.0]], [[50.0, 120.0], [100.0, 140.0]] ]) y_scale = tf.constant(1.0 / 100) x_scale = tf.constant(1.0 / 200) expected_keypoints = tf.constant([ [[0., 0.], [1.0, 1.0]], [[0.5, 0.6], [1.0, 0.7]] ]) output = keypoint_ops.scale(keypoints, y_scale, x_scale) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_clip_to_window(self): def graph_fn(): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.25], [0.75, 0.75]] ]) output = keypoint_ops.clip_to_window(keypoints, window) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_prune_outside_window(self): def graph_fn(): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]], [[np.nan, np.nan], [np.nan, np.nan]]]) output = keypoint_ops.prune_outside_window(keypoints, window) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_change_coordinate_frame(self): def graph_fn(): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([ [[0, 0.5], [1.0, 1.0]], [[0.5, -0.5], [1.5, 1.5]] ]) output = keypoint_ops.change_coordinate_frame(keypoints, window) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_keypoints_to_enclosing_bounding_boxes(self): def graph_fn(): keypoints = tf.constant( [ [ # Instance 0. [5., 10.], [3., 20.], [8., 4.], ], [ # Instance 1. [2., 12.], [0., 3.], [5., 19.], ], ], dtype=tf.float32) bboxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes(keypoints) return bboxes output = self.execute(graph_fn, []) expected_bboxes = np.array( [ [3., 4., 8., 20.], [0., 3., 5., 19.] ]) self.assertAllClose(expected_bboxes, output) def test_keypoints_to_enclosing_bounding_boxes_axis2(self): def graph_fn(): keypoints = tf.constant( [ [ # Instance 0. [5., 10.], [3., 20.], [8., 4.], ], [ # Instance 1. [2., 12.], [0., 3.], [5., 19.], ], ], dtype=tf.float32) keypoints = tf.stack([keypoints, keypoints], axis=0) bboxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes( keypoints, keypoints_axis=2) return bboxes output = self.execute(graph_fn, []) expected_bboxes = np.array( [ [3., 4., 8., 20.], [0., 3., 5., 19.] ]) self.assertAllClose(expected_bboxes, output[0]) self.assertAllClose(expected_bboxes, output[1]) def test_to_normalized_coordinates(self): def graph_fn(): keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) output = keypoint_ops.to_normalized_coordinates( keypoints, 40, 60) expected_keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_to_normalized_coordinates_already_normalized(self): if self.has_tpu(): return def graph_fn(): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) output = keypoint_ops.to_normalized_coordinates( keypoints, 40, 60) return output with self.assertRaisesOpError('assertion failed'): self.execute_cpu(graph_fn, []) def test_to_absolute_coordinates(self): def graph_fn(): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) output = keypoint_ops.to_absolute_coordinates( keypoints, 40, 60) expected_keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_to_absolute_coordinates_already_absolute(self): if self.has_tpu(): return def graph_fn(): keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) output = keypoint_ops.to_absolute_coordinates( keypoints, 40, 60) return output with self.assertRaisesOpError('assertion failed'): self.execute_cpu(graph_fn, []) def test_flip_horizontal(self): def graph_fn(): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] ]) expected_keypoints = tf.constant([ [[0.1, 0.9], [0.2, 0.8], [0.3, 0.7]], [[0.4, 0.6], [0.5, 0.5], [0.6, 0.4]], ]) output = keypoint_ops.flip_horizontal(keypoints, 0.5) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_flip_horizontal_permutation(self): def graph_fn(): keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]) flip_permutation = [0, 2, 1] expected_keypoints = tf.constant([ [[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]], [[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]], ]) output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_flip_vertical(self): def graph_fn(): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] ]) expected_keypoints = tf.constant([ [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]], [[0.6, 0.4], [0.5, 0.5], [0.4, 0.6]], ]) output = keypoint_ops.flip_vertical(keypoints, 0.5) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_flip_vertical_permutation(self): def graph_fn(): keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]) flip_permutation = [0, 2, 1] expected_keypoints = tf.constant([ [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]], [[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]], ]) output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_rot90(self): def graph_fn(): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]] ]) expected_keypoints = tf.constant([ [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]], [[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]], ]) output = keypoint_ops.rot90(keypoints) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_rot90_permutation(self): def graph_fn(): keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]]) rot_permutation = [0, 2, 1] expected_keypoints = tf.constant([ [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]], [[0.4, 0.4], [0.3, 0.6], [0.4, 0.5]], ]) output = keypoint_ops.rot90(keypoints, rotation_permutation=rot_permutation) return output, expected_keypoints output, expected_keypoints = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoints) def test_keypoint_weights_from_visibilities(self): def graph_fn(): keypoint_visibilities = tf.constant([ [True, True, False], [False, True, False] ]) per_keypoint_weights = [1.0, 2.0, 3.0] keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities( keypoint_visibilities, per_keypoint_weights) return keypoint_weights expected_keypoint_weights = [ [1.0, 2.0, 0.0], [0.0, 2.0, 0.0] ] output = self.execute(graph_fn, []) self.assertAllClose(output, expected_keypoint_weights) def test_keypoint_weights_from_visibilities_no_per_kpt_weights(self): def graph_fn(): keypoint_visibilities = tf.constant([ [True, True, False], [False, True, False] ]) keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities( keypoint_visibilities) return keypoint_weights expected_keypoint_weights = [ [1.0, 1.0, 0.0], [0.0, 1.0, 0.0] ] output = self.execute(graph_fn, []) self.assertAllClose(expected_keypoint_weights, output) def test_set_keypoint_visibilities_no_initial_kpt_vis(self): keypoints_np = np.array( [ [[np.nan, 0.2], [np.nan, np.nan], [-3., 7.]], [[0.5, 0.2], [4., 1.0], [-3., np.nan]], ], dtype=np.float32) def graph_fn(): keypoints = tf.constant(keypoints_np, dtype=tf.float32) keypoint_visibilities = keypoint_ops.set_keypoint_visibilities( keypoints) return keypoint_visibilities expected_kpt_vis = [ [False, False, True], [True, True, False] ] output = self.execute(graph_fn, []) self.assertAllEqual(expected_kpt_vis, output) def test_set_keypoint_visibilities(self): keypoints_np = np.array( [ [[np.nan, 0.2], [np.nan, np.nan], [-3., 7.]], [[0.5, 0.2], [4., 1.0], [-3., np.nan]], ], dtype=np.float32) initial_keypoint_visibilities_np = np.array( [ [False, True, # Will be overriden by NaN coords. False], # Will be maintained, even though non-NaN coords. [True, False, # Will be maintained, even though non-NaN coords. False] ]) def graph_fn(): keypoints = tf.constant(keypoints_np, dtype=tf.float32) initial_keypoint_visibilities = tf.constant( initial_keypoint_visibilities_np, dtype=tf.bool) keypoint_visibilities = keypoint_ops.set_keypoint_visibilities( keypoints, initial_keypoint_visibilities) return keypoint_visibilities expected_kpt_vis = [ [False, False, False], [True, False, False] ] output = self.execute(graph_fn, []) self.assertAllEqual(expected_kpt_vis, output) if __name__ == '__main__': tf.test.main()
13,269
32.510101
80
py
models
models-master/research/object_detection/core/target_assigner.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base target assigner module. The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and groundtruth detections (bounding boxes), to assign classification and regression targets to each anchor as well as weights to each anchor (specifying, e.g., which anchors should not contribute to training loss). It assigns classification/regression targets by performing the following steps: 1) Computing pairwise similarity between anchors and groundtruth boxes using a provided RegionSimilarity Calculator 2) Computing a matching based on the similarity matrix using a provided Matcher 3) Assigning regression targets based on the matching and a provided BoxCoder 4) Assigning classification targets based on the matching and groundtruth labels Note that TargetAssigners only operate on detections from a single image at a time, so any logic for applying a TargetAssigner to multiple images must be handled externally. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf import tensorflow.compat.v2 as tf2 from object_detection.box_coders import faster_rcnn_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.core import box_coder from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import densepose_ops from object_detection.core import keypoint_ops from object_detection.core import matcher as mat from object_detection.core import region_similarity_calculator as sim_calc from object_detection.core import standard_fields as fields from object_detection.matchers import argmax_matcher from object_detection.matchers import hungarian_matcher from object_detection.utils import shape_utils from object_detection.utils import target_assigner_utils as ta_utils from object_detection.utils import tf_version if tf_version.is_tf1(): from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top ResizeMethod = tf2.image.ResizeMethod _DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0 class TargetAssigner(object): """Target assigner to compute classification and regression targets.""" def __init__(self, similarity_calc, matcher, box_coder_instance, negative_class_weight=1.0): """Construct Object Detection Target Assigner. Args: similarity_calc: a RegionSimilarityCalculator matcher: an object_detection.core.Matcher used to match groundtruth to anchors. box_coder_instance: an object_detection.core.BoxCoder used to encode matching groundtruth boxes with respect to anchors. negative_class_weight: classification weight to be associated to negative anchors (default: 1.0). The weight must be in [0., 1.]. Raises: ValueError: if similarity_calc is not a RegionSimilarityCalculator or if matcher is not a Matcher or if box_coder is not a BoxCoder """ if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator): raise ValueError('similarity_calc must be a RegionSimilarityCalculator') if not isinstance(matcher, mat.Matcher): raise ValueError('matcher must be a Matcher') if not isinstance(box_coder_instance, box_coder.BoxCoder): raise ValueError('box_coder must be a BoxCoder') self._similarity_calc = similarity_calc self._matcher = matcher self._box_coder = box_coder_instance self._negative_class_weight = negative_class_weight @property def box_coder(self): return self._box_coder # TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields. def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, unmatched_class_label=None, groundtruth_weights=None): """Assign classification and regression targets to each anchor. For a given set of anchors and groundtruth detections, match anchors to groundtruth_boxes and assign classification and regression targets to each anchor as well as weights based on the resulting match (specifying, e.g., which anchors should not contribute to training loss). Anchors that are not matched to anything are given a classification target of self._unmatched_cls_target which can be specified via the constructor. Args: anchors: a BoxList representing N anchors groundtruth_boxes: a BoxList representing M groundtruth boxes groundtruth_labels: a tensor of shape [M, d_1, ... d_k] with labels for each of the ground_truth boxes. The subshape [d_1, ... d_k] can be empty (corresponding to scalar inputs). When set to None, groundtruth_labels assumes a binary problem where all ground_truth boxes get a positive label (of 1). unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). If set to None, unmatched_cls_target is set to be [0] for each anchor. groundtruth_weights: a float tensor of shape [M] indicating the weight to assign to all anchors match to a particular groundtruth box. The weights must be in [0., 1.]. If None, all weights are set to 1. Generally no groundtruth boxes with zero weight match to any anchors as matchers are aware of groundtruth weights. Additionally, `cls_weights` and `reg_weights` are calculated using groundtruth weights as an added safety. Returns: cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has shape [num_gt_boxes, d_1, d_2, ... d_k]. cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], representing weights for each element in cls_targets. reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension] reg_weights: a float32 tensor with shape [num_anchors] match: an int32 tensor of shape [num_anchors] containing result of anchor groundtruth matching. Each position in the tensor indicates an anchor and holds the following meaning: (1) if match[i] >= 0, anchor i is matched with groundtruth match[i]. (2) if match[i]=-1, anchor i is marked to be background . (3) if match[i]=-2, anchor i is ignored since it is not background and does not have sufficient overlap to call it a foreground. Raises: ValueError: if anchors or groundtruth_boxes are not of type box_list.BoxList """ if not isinstance(anchors, box_list.BoxList): raise ValueError('anchors must be an BoxList') if not isinstance(groundtruth_boxes, box_list.BoxList): raise ValueError('groundtruth_boxes must be an BoxList') if unmatched_class_label is None: unmatched_class_label = tf.constant([0], tf.float32) if groundtruth_labels is None: groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0)) groundtruth_labels = tf.expand_dims(groundtruth_labels, -1) unmatched_shape_assert = shape_utils.assert_shape_equal( shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:], shape_utils.combined_static_and_dynamic_shape(unmatched_class_label)) labels_and_box_shapes_assert = shape_utils.assert_shape_equal( shape_utils.combined_static_and_dynamic_shape( groundtruth_labels)[:1], shape_utils.combined_static_and_dynamic_shape( groundtruth_boxes.get())[:1]) if groundtruth_weights is None: num_gt_boxes = groundtruth_boxes.num_boxes_static() if not num_gt_boxes: num_gt_boxes = groundtruth_boxes.num_boxes() groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32) # set scores on the gt boxes scores = 1 - groundtruth_labels[:, 0] groundtruth_boxes.add_field(fields.BoxListFields.scores, scores) with tf.control_dependencies( [unmatched_shape_assert, labels_and_box_shapes_assert]): match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors) match = self._matcher.match(match_quality_matrix, valid_rows=tf.greater(groundtruth_weights, 0)) reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match) cls_targets = self._create_classification_targets(groundtruth_labels, unmatched_class_label, match) reg_weights = self._create_regression_weights(match, groundtruth_weights) cls_weights = self._create_classification_weights(match, groundtruth_weights) # convert cls_weights from per-anchor to per-class. class_label_shape = tf.shape(cls_targets)[1:] weights_shape = tf.shape(cls_weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), class_label_shape], axis=0) for _ in range(len(cls_targets.get_shape()[1:])): cls_weights = tf.expand_dims(cls_weights, -1) cls_weights = tf.tile(cls_weights, weights_multiple) num_anchors = anchors.num_boxes_static() if num_anchors is not None: reg_targets = self._reset_target_shape(reg_targets, num_anchors) cls_targets = self._reset_target_shape(cls_targets, num_anchors) reg_weights = self._reset_target_shape(reg_weights, num_anchors) cls_weights = self._reset_target_shape(cls_weights, num_anchors) return (cls_targets, cls_weights, reg_targets, reg_weights, match.match_results) def _reset_target_shape(self, target, num_anchors): """Sets the static shape of the target. Args: target: the target tensor. Its first dimension will be overwritten. num_anchors: the number of anchors, which is used to override the target's first dimension. Returns: A tensor with the shape info filled in. """ target_shape = target.get_shape().as_list() target_shape[0] = num_anchors target.set_shape(target_shape) return target def _create_regression_targets(self, anchors, groundtruth_boxes, match): """Returns a regression target for each anchor. Args: anchors: a BoxList representing N anchors groundtruth_boxes: a BoxList representing M groundtruth_boxes match: a matcher.Match object Returns: reg_targets: a float32 tensor with shape [N, box_code_dimension] """ matched_gt_boxes = match.gather_based_on_match( groundtruth_boxes.get(), unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4)) matched_gt_boxlist = box_list.BoxList(matched_gt_boxes) if groundtruth_boxes.has_field(fields.BoxListFields.keypoints): groundtruth_keypoints = groundtruth_boxes.get_field( fields.BoxListFields.keypoints) matched_keypoints = match.gather_based_on_match( groundtruth_keypoints, unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]), ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:])) matched_gt_boxlist.add_field(fields.BoxListFields.keypoints, matched_keypoints) matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors) match_results_shape = shape_utils.combined_static_and_dynamic_shape( match.match_results) # Zero out the unmatched and ignored regression targets. unmatched_ignored_reg_targets = tf.tile( self._default_regression_target(), [match_results_shape[0], 1]) matched_anchors_mask = match.matched_column_indicator() reg_targets = tf.where(matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets) return reg_targets def _default_regression_target(self): """Returns the default target for anchors to regress to. Default regression targets are set to zero (though in this implementation what these targets are set to should not matter as the regression weight of any box set to regress to the default target is zero). Returns: default_target: a float32 tensor with shape [1, box_code_dimension] """ return tf.constant([self._box_coder.code_size*[0]], tf.float32) def _create_classification_targets(self, groundtruth_labels, unmatched_class_label, match): """Create classification targets for each anchor. Assign a classification target of for each anchor to the matching groundtruth label that is provided by match. Anchors that are not matched to anything are given the target self._unmatched_cls_target Args: groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k] with labels for each of the ground_truth boxes. The subshape [d_1, ... d_k] can be empty (corresponding to scalar labels). unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). match: a matcher.Match object that provides a matching between anchors and groundtruth boxes. Returns: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has shape [num_gt_boxes, d_1, d_2, ... d_k]. """ return match.gather_based_on_match( groundtruth_labels, unmatched_value=unmatched_class_label, ignored_value=unmatched_class_label) def _create_regression_weights(self, match, groundtruth_weights): """Set regression weight for each anchor. Only positive anchors are set to contribute to the regression loss, so this method returns a weight of 1 for every positive anchor and 0 for every negative anchor. Args: match: a matcher.Match object that provides a matching between anchors and groundtruth boxes. groundtruth_weights: a float tensor of shape [M] indicating the weight to assign to all anchors match to a particular groundtruth box. Returns: a float32 tensor with shape [num_anchors] representing regression weights. """ return match.gather_based_on_match( groundtruth_weights, ignored_value=0., unmatched_value=0.) def _create_classification_weights(self, match, groundtruth_weights): """Create classification weights for each anchor. Positive (matched) anchors are associated with a weight of positive_class_weight and negative (unmatched) anchors are associated with a weight of negative_class_weight. When anchors are ignored, weights are set to zero. By default, both positive/negative weights are set to 1.0, but they can be adjusted to handle class imbalance (which is almost always the case in object detection). Args: match: a matcher.Match object that provides a matching between anchors and groundtruth boxes. groundtruth_weights: a float tensor of shape [M] indicating the weight to assign to all anchors match to a particular groundtruth box. Returns: a float32 tensor with shape [num_anchors] representing classification weights. """ return match.gather_based_on_match( groundtruth_weights, ignored_value=0., unmatched_value=self._negative_class_weight) def get_box_coder(self): """Get BoxCoder of this TargetAssigner. Returns: BoxCoder object. """ return self._box_coder # TODO(rathodv): This method pulls in all the implementation dependencies into # core. Therefore its best to have this factory method outside of core. def create_target_assigner(reference, stage=None, negative_class_weight=1.0, use_matmul_gather=False): """Factory function for creating standard target assigners. Args: reference: string referencing the type of TargetAssigner. stage: string denoting stage: {proposal, detection}. negative_class_weight: classification weight to be associated to negative anchors (default: 1.0) use_matmul_gather: whether to use matrix multiplication based gather which are better suited for TPUs. Returns: TargetAssigner: desired target assigner. Raises: ValueError: if combination reference+stage is invalid. """ if reference == 'Multibox' and stage == 'proposal': if tf_version.is_tf2(): raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.') similarity_calc = sim_calc.NegSqDistSimilarity() matcher = bipartite_matcher.GreedyBipartiteMatcher() box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder() elif reference == 'FasterRCNN' and stage == 'proposal': similarity_calc = sim_calc.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7, unmatched_threshold=0.3, force_match_for_each_row=True, use_matmul_gather=use_matmul_gather) box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder( scale_factors=[10.0, 10.0, 5.0, 5.0]) elif reference == 'FasterRCNN' and stage == 'detection': similarity_calc = sim_calc.IouSimilarity() # Uses all proposals with IOU < 0.5 as candidate negatives. matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, negatives_lower_than_unmatched=True, use_matmul_gather=use_matmul_gather) box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder( scale_factors=[10.0, 10.0, 5.0, 5.0]) elif reference == 'FastRCNN': similarity_calc = sim_calc.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.1, force_match_for_each_row=False, negatives_lower_than_unmatched=False, use_matmul_gather=use_matmul_gather) box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder() else: raise ValueError('No valid combination of reference and stage.') return TargetAssigner(similarity_calc, matcher, box_coder_instance, negative_class_weight=negative_class_weight) def batch_assign(target_assigner, anchors_batch, gt_box_batch, gt_class_targets_batch, unmatched_class_label=None, gt_weights_batch=None): """Batched assignment of classification and regression targets. Args: target_assigner: a target assigner. anchors_batch: BoxList representing N box anchors or list of BoxList objects with length batch_size representing anchor sets. gt_box_batch: a list of BoxList objects with length batch_size representing groundtruth boxes for each image in the batch gt_class_targets_batch: a list of tensors with length batch_size, where each tensor has shape [num_gt_boxes_i, classification_target_size] and num_gt_boxes_i is the number of boxes in the ith boxlist of gt_box_batch. unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). gt_weights_batch: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. Returns: batch_cls_targets: a tensor with shape [batch_size, num_anchors, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_anchors, num_classes], batch_reg_targets: a tensor with shape [batch_size, num_anchors, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_anchors], match: an int32 tensor of shape [batch_size, num_anchors] containing result of anchor groundtruth matching. Each position in the tensor indicates an anchor and holds the following meaning: (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. (2) if match[x, i]=-1, anchor i is marked to be background . (3) if match[x, i]=-2, anchor i is ignored since it is not background and does not have sufficient overlap to call it a foreground. Raises: ValueError: if input list lengths are inconsistent, i.e., batch_size == len(gt_box_batch) == len(gt_class_targets_batch) and batch_size == len(anchors_batch) unless anchors_batch is a single BoxList. """ if not isinstance(anchors_batch, list): anchors_batch = len(gt_box_batch) * [anchors_batch] if not all( isinstance(anchors, box_list.BoxList) for anchors in anchors_batch): raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') if not (len(anchors_batch) == len(gt_box_batch) == len(gt_class_targets_batch)): raise ValueError('batch size incompatible with lengths of anchors_batch, ' 'gt_box_batch and gt_class_targets_batch.') cls_targets_list = [] cls_weights_list = [] reg_targets_list = [] reg_weights_list = [] match_list = [] if gt_weights_batch is None: gt_weights_batch = [None] * len(gt_class_targets_batch) for anchors, gt_boxes, gt_class_targets, gt_weights in zip( anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch): (cls_targets, cls_weights, reg_targets, reg_weights, match) = target_assigner.assign( anchors, gt_boxes, gt_class_targets, unmatched_class_label, gt_weights) cls_targets_list.append(cls_targets) cls_weights_list.append(cls_weights) reg_targets_list.append(reg_targets) reg_weights_list.append(reg_weights) match_list.append(match) batch_cls_targets = tf.stack(cls_targets_list) batch_cls_weights = tf.stack(cls_weights_list) batch_reg_targets = tf.stack(reg_targets_list) batch_reg_weights = tf.stack(reg_weights_list) batch_match = tf.stack(match_list) return (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, batch_match) # Assign an alias to avoid large refactor of existing users. batch_assign_targets = batch_assign def batch_get_targets(batch_match, groundtruth_tensor_list, groundtruth_weights_list, unmatched_value, unmatched_weight): """Returns targets based on anchor-groundtruth box matching results. Args: batch_match: An int32 tensor of shape [batch, num_anchors] containing the result of target assignment returned by TargetAssigner.assign(..). groundtruth_tensor_list: A list of groundtruth tensors of shape [num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type. groundtruth_weights_list: A list of weights, one per groundtruth tensor, of shape [num_groundtruth]. unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as groundtruth tensor containing target value for anchors that remain unmatched. unmatched_weight: Scalar weight to assign to anchors that remain unmatched. Returns: targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k] containing targets for anchors. weights: A float tensor of shape [batch, num_anchors] containing the weights to assign to each target. """ match_list = tf.unstack(batch_match) targets_list = [] weights_list = [] for match_tensor, groundtruth_tensor, groundtruth_weight in zip( match_list, groundtruth_tensor_list, groundtruth_weights_list): match_object = mat.Match(match_tensor) targets = match_object.gather_based_on_match( groundtruth_tensor, unmatched_value=unmatched_value, ignored_value=unmatched_value) targets_list.append(targets) weights = match_object.gather_based_on_match( groundtruth_weight, unmatched_value=unmatched_weight, ignored_value=tf.zeros_like(unmatched_weight)) weights_list.append(weights) return tf.stack(targets_list), tf.stack(weights_list) def batch_assign_confidences(target_assigner, anchors_batch, gt_box_batch, gt_class_confidences_batch, gt_weights_batch=None, unmatched_class_label=None, include_background_class=True, implicit_class_weight=1.0): """Batched assignment of classification and regression targets. This differences between batch_assign_confidences and batch_assign_targets: - 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and tensor (high-dimensional) targets. 'batch_assign_confidences' only support scalar (agnostic) and vector (multiclass) targets. - 'batch_assign_targets' assumes the input class tensor using the binary one/K-hot encoding. 'batch_assign_confidences' takes the class confidence scores as the input, where 1 means positive classes, 0 means implicit negative classes, and -1 means explicit negative classes. - 'batch_assign_confidences' assigns the targets in the similar way as 'batch_assign_targets' except that it gives different weights for implicit and explicit classes. This allows user to control the negative gradients pushed differently for implicit and explicit examples during the training. Args: target_assigner: a target assigner. anchors_batch: BoxList representing N box anchors or list of BoxList objects with length batch_size representing anchor sets. gt_box_batch: a list of BoxList objects with length batch_size representing groundtruth boxes for each image in the batch gt_class_confidences_batch: a list of tensors with length batch_size, where each tensor has shape [num_gt_boxes_i, classification_target_size] and num_gt_boxes_i is the number of boxes in the ith boxlist of gt_box_batch. Note that in this tensor, 1 means explicit positive class, -1 means explicit negative class, and 0 means implicit negative class. gt_weights_batch: A list of 1-D tf.float32 tensors of shape [num_gt_boxes_i] containing weights for groundtruth boxes. unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). include_background_class: whether or not gt_class_confidences_batch includes the background class. implicit_class_weight: the weight assigned to implicit examples. Returns: batch_cls_targets: a tensor with shape [batch_size, num_anchors, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_anchors, num_classes], batch_reg_targets: a tensor with shape [batch_size, num_anchors, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_anchors], match: an int32 tensor of shape [batch_size, num_anchors] containing result of anchor groundtruth matching. Each position in the tensor indicates an anchor and holds the following meaning: (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i]. (2) if match[x, i]=-1, anchor i is marked to be background . (3) if match[x, i]=-2, anchor i is ignored since it is not background and does not have sufficient overlap to call it a foreground. Raises: ValueError: if input list lengths are inconsistent, i.e., batch_size == len(gt_box_batch) == len(gt_class_targets_batch) and batch_size == len(anchors_batch) unless anchors_batch is a single BoxList, or if any element in gt_class_confidences_batch has rank > 2. """ if not isinstance(anchors_batch, list): anchors_batch = len(gt_box_batch) * [anchors_batch] if not all( isinstance(anchors, box_list.BoxList) for anchors in anchors_batch): raise ValueError('anchors_batch must be a BoxList or list of BoxLists.') if not (len(anchors_batch) == len(gt_box_batch) == len(gt_class_confidences_batch)): raise ValueError('batch size incompatible with lengths of anchors_batch, ' 'gt_box_batch and gt_class_confidences_batch.') cls_targets_list = [] cls_weights_list = [] reg_targets_list = [] reg_weights_list = [] match_list = [] if gt_weights_batch is None: gt_weights_batch = [None] * len(gt_class_confidences_batch) for anchors, gt_boxes, gt_class_confidences, gt_weights in zip( anchors_batch, gt_box_batch, gt_class_confidences_batch, gt_weights_batch): if (gt_class_confidences is not None and len(gt_class_confidences.get_shape().as_list()) > 2): raise ValueError('The shape of the class target is not supported. ', gt_class_confidences.get_shape()) cls_targets, _, reg_targets, _, match = target_assigner.assign( anchors, gt_boxes, gt_class_confidences, unmatched_class_label, groundtruth_weights=gt_weights) if include_background_class: cls_targets_without_background = tf.slice( cls_targets, [0, 1], [-1, -1]) else: cls_targets_without_background = cls_targets positive_mask = tf.greater(cls_targets_without_background, 0.0) negative_mask = tf.less(cls_targets_without_background, 0.0) explicit_example_mask = tf.logical_or(positive_mask, negative_mask) positive_anchors = tf.reduce_any(positive_mask, axis=-1) regression_weights = tf.cast(positive_anchors, dtype=tf.float32) regression_targets = ( reg_targets * tf.expand_dims(regression_weights, axis=-1)) regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1) cls_targets_without_background = ( cls_targets_without_background * (1 - tf.cast(negative_mask, dtype=tf.float32))) cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast( explicit_example_mask, dtype=tf.float32) + implicit_class_weight) if include_background_class: cls_weights_background = ( (1 - implicit_class_weight) * regression_weights_expanded + implicit_class_weight) classification_weights = tf.concat( [cls_weights_background, cls_weights_without_background], axis=-1) cls_targets_background = 1 - regression_weights_expanded classification_targets = tf.concat( [cls_targets_background, cls_targets_without_background], axis=-1) else: classification_targets = cls_targets_without_background classification_weights = cls_weights_without_background cls_targets_list.append(classification_targets) cls_weights_list.append(classification_weights) reg_targets_list.append(regression_targets) reg_weights_list.append(regression_weights) match_list.append(match) batch_cls_targets = tf.stack(cls_targets_list) batch_cls_weights = tf.stack(cls_weights_list) batch_reg_targets = tf.stack(reg_targets_list) batch_reg_weights = tf.stack(reg_weights_list) batch_match = tf.stack(match_list) return (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, batch_match) def _smallest_positive_root(a, b, c): """Returns the smallest positive root of a quadratic equation.""" discriminant = tf.sqrt(b ** 2 - 4 * a * c) # TODO(vighneshb) We are currently using the slightly incorrect # CenterNet implementation. The commented lines implement the fixed version # in https://github.com/princeton-vl/CornerNet. Change the implementation # after verifying it has no negative impact. # root1 = (-b - discriminant) / (2 * a) # root2 = (-b + discriminant) / (2 * a) # return tf.where(tf.less(root1, 0), root2, root1) return (-b + discriminant) / (2.0) def max_distance_for_overlap(height, width, min_iou): """Computes how far apart bbox corners can lie while maintaining the iou. Given a bounding box size, this function returns a lower bound on how far apart the corners of another box can lie while still maintaining the given IoU. The implementation is based on the `gaussian_radius` function in the Objects as Points github repo: https://github.com/xingyizhou/CenterNet Args: height: A 1-D float Tensor representing height of the ground truth boxes. width: A 1-D float Tensor representing width of the ground truth boxes. min_iou: A float representing the minimum IoU desired. Returns: distance: A 1-D Tensor of distances, of the same length as the input height and width tensors. """ # Given that the detected box is displaced at a distance `d`, the exact # IoU value will depend on the angle at which each corner is displaced. # We simplify our computation by assuming that each corner is displaced by # a distance `d` in both x and y direction. This gives us a lower IoU than # what is actually realizable and ensures that any box with corners less # than `d` distance apart will always have an IoU greater than or equal # to `min_iou` # The following 3 cases can be worked on geometrically and come down to # solving a quadratic inequality. In each case, to ensure `min_iou` we use # the smallest positive root of the equation. # Case where detected box is offset from ground truth and no box completely # contains the other. distance_detection_offset = _smallest_positive_root( a=1, b=-(height + width), c=width * height * ((1 - min_iou) / (1 + min_iou)) ) # Case where detection is smaller than ground truth and completely contained # in it. distance_detection_in_gt = _smallest_positive_root( a=4, b=-2 * (height + width), c=(1 - min_iou) * width * height ) # Case where ground truth is smaller than detection and completely contained # in it. distance_gt_in_detection = _smallest_positive_root( a=4 * min_iou, b=(2 * min_iou) * (width + height), c=(min_iou - 1) * width * height ) return tf.reduce_min([distance_detection_offset, distance_gt_in_detection, distance_detection_in_gt], axis=0) def get_batch_predictions_from_indices(batch_predictions, indices): """Gets the values of predictions in a batch at the given indices. The indices are expected to come from the offset targets generation functions in this library. The returned value is intended to be used inside a loss function. Args: batch_predictions: A tensor of shape [batch_size, height, width, channels] or [batch_size, height, width, class, channels] for class-specific features (e.g. keypoint joint offsets). indices: A tensor of shape [num_instances, 3] for single class features or [num_instances, 4] for multiple classes features. Returns: values: A tensor of shape [num_instances, channels] holding the predicted values at the given indices. """ # Note, gather_nd (and its gradient scatter_nd) runs significantly slower (on # TPU) than gather with flattened inputs, so reshape the tensor, flatten the # indices, and run gather. shape = shape_utils.combined_static_and_dynamic_shape(batch_predictions) # [B, H, W, C] -> [H*W, W, 1] or [B, H, W, N, C] -> [H*W*N, W*N, N, 1] rev_cum_interior_indices = tf.reverse(tf.math.cumprod(shape[-2:0:-1]), [0]) rev_cum_interior_indices = tf.concat([rev_cum_interior_indices, [1]], axis=0) # Compute flattened indices and gather. flattened_inds = tf.linalg.matmul( indices, rev_cum_interior_indices[:, tf.newaxis])[:, 0] batch_predictions_2d = tf.reshape(batch_predictions, [-1, shape[-1]]) return tf.gather(batch_predictions_2d, flattened_inds, axis=0) def _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap): """Computes the standard deviation of the Gaussian kernel from box size. Args: boxes_height: A 1D tensor with shape [num_instances] representing the height of each box. boxes_width: A 1D tensor with shape [num_instances] representing the width of each box. min_overlap: The minimum IOU overlap that boxes need to have to not be penalized. Returns: A 1D tensor with shape [num_instances] representing the computed Gaussian sigma for each of the box. """ # We are dividing by 3 so that points closer than the computed # distance have a >99% CDF. sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap) sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0 return sigma def _preprocess_keypoints_and_weights(out_height, out_width, keypoints, class_onehot, class_weights, keypoint_weights, class_id, keypoint_indices): """Preprocesses the keypoints and the corresponding keypoint weights. This function performs several common steps to preprocess the keypoints and keypoint weights features, including: 1) Select the subset of keypoints based on the keypoint indices, fill the keypoint NaN values with zeros and convert to absolute coordinates. 2) Generate the weights of the keypoint using the following information: a. The class of the instance. b. The NaN value of the keypoint coordinates. c. The provided keypoint weights. Args: out_height: An integer or an integer tensor indicating the output height of the model. out_width: An integer or an integer tensor indicating the output width of the model. keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2] representing the original keypoint grountruth coordinates. class_onehot: A float tensor of shape [num_instances, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. class_weights: A float tensor of shape [num_instances] containing weights for groundtruth instances. keypoint_weights: A float tensor of shape [num_instances, num_total_keypoints] representing the weights of each keypoints. class_id: int, the ID of the class (0-indexed) that contains the target keypoints to consider in this task. keypoint_indices: A list of integers representing the indices of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints that should be considered in this task. Returns: A tuple of two tensors: keypoint_absolute: A float tensor of shape [num_instances, num_keypoints, 2] which is the selected and updated keypoint coordinates. keypoint_weights: A float tensor of shape [num_instances, num_keypoints] representing the updated weight of each keypoint. """ # Select the targets keypoints by their type ids and generate the mask # of valid elements. valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class( keypoint_coordinates=keypoints, class_id=class_id, class_onehot=class_onehot, class_weights=class_weights, keypoint_indices=keypoint_indices) # Keypoint coordinates in absolute coordinate system. # The shape of the tensors: [num_instances, num_keypoints, 2]. keypoints_absolute = keypoint_ops.to_absolute_coordinates( keypoints, out_height, out_width) # Assign default weights for the keypoints. if keypoint_weights is None: keypoint_weights = tf.ones_like(keypoints[:, :, 0]) else: keypoint_weights = tf.gather( keypoint_weights, indices=keypoint_indices, axis=1) keypoint_weights = keypoint_weights * valid_mask return keypoints_absolute, keypoint_weights class CenterNetCenterHeatmapTargetAssigner(object): """Wrapper to compute the object center heatmap.""" def __init__(self, stride, min_overlap=0.7, compute_heatmap_sparse=False, keypoint_class_id=None, keypoint_indices=None, keypoint_weights_for_center=None, box_heatmap_type='adaptive_gaussian', heatmap_exponent=1.0): """Initializes the target assigner. Args: stride: int, the stride of the network in output pixels. min_overlap: The minimum IOU overlap that boxes need to have to not be penalized. compute_heatmap_sparse: bool, indicating whether or not to use the sparse version of the Op that computes the heatmap. The sparse version scales better with number of classes, but in some cases is known to cause OOM error. See (b/170989061). keypoint_class_id: int, the ID of the class (0-indexed) that contains the target keypoints to consider in this task. keypoint_indices: A list of integers representing the indices of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints from gt_keypoints that should be considered in this task. keypoint_weights_for_center: The keypoint weights used for calculating the location of object center. The number of weights need to be the same as the number of keypoints. The object center is calculated by the weighted mean of the keypoint locations. If not provided, the object center is determined by the center of the bounding box (default behavior). box_heatmap_type: str, the algorithm used to compute the box heatmap, used when calling the assign_center_targets_from_boxes method. Options are: 'adaptaive_gaussian': A box-size adaptive Gaussian from the original paper[1]. 'iou': IOU based heatmap target where each point is assigned an IOU based on its location, assuming that it produced a box centered at that point with the correct size. heatmap_exponent: float, The generated heatmap is exponentiated with this number. A number > 1 will result in the heatmap being more peaky and a number < 1 will cause the heatmap to be more spreadout. """ self._stride = stride self._min_overlap = min_overlap self._compute_heatmap_sparse = compute_heatmap_sparse self._keypoint_class_id = keypoint_class_id self._keypoint_indices = keypoint_indices self._keypoint_weights_for_center = keypoint_weights_for_center self._box_heatmap_type = box_heatmap_type self._heatmap_exponent = heatmap_exponent def assign_center_targets_from_boxes(self, height, width, gt_boxes_list, gt_classes_list, gt_weights_list=None, maximum_normalized_coordinate=1.1): """Computes the object center heatmap target. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The box coordinates are expected in normalized coordinates. gt_classes_list: A list of float tensors with shape [num_boxes, num_classes] representing the one-hot encoded class labels for each box in the gt_boxes_list. gt_weights_list: A list of float tensors with shape [num_boxes] representing the weight of each groundtruth detection box. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. This is used to check bounds during converting normalized coordinates to absolute coordinates. Returns: heatmap: A Tensor of size [batch_size, output_height, output_width, num_classes] representing the per class center heatmap. output_height and output_width are computed by dividing the input height and width by the stride specified during initialization. """ out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32) out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32) # Compute the yx-grid to be used to generate the heatmap. Each returned # tensor has shape of [out_height, out_width] (y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width) heatmaps = [] if gt_weights_list is None: gt_weights_list = [None] * len(gt_boxes_list) # TODO(vighneshb) Replace the for loop with a batch version. for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list, gt_weights_list): boxes = box_list.BoxList(boxes) # Convert the box coordinates to absolute output image dimension space. boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1), maximum_normalized_coordinate=maximum_normalized_coordinate) # Get the box center coordinates. Each returned tensors have the shape of # [num_instances] (y_center, x_center, boxes_height, boxes_width) = boxes.get_center_coordinates_and_sizes() # Compute the sigma from box size. The tensor shape: [num_instances]. sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, self._min_overlap) # Apply the Gaussian kernel to the center coordinates. Returned heatmap # has shape of [out_height, out_width, num_classes] if self._box_heatmap_type == 'adaptive_gaussian': heatmap = ta_utils.coordinates_to_heatmap( y_grid=y_grid, x_grid=x_grid, y_coordinates=y_center, x_coordinates=x_center, sigma=sigma, channel_onehot=class_targets, channel_weights=weights, sparse=self._compute_heatmap_sparse) elif self._box_heatmap_type == 'iou': heatmap = ta_utils.coordinates_to_iou(y_grid, x_grid, boxes, class_targets, weights) else: raise ValueError(f'Unknown heatmap type - {self._box_heatmap_type}') heatmap = tf.stop_gradient(heatmap) heatmaps.append(heatmap) # Return the stacked heatmaps over the batch. stacked_heatmaps = tf.stack(heatmaps, axis=0) return (tf.pow(stacked_heatmaps, self._heatmap_exponent) if self._heatmap_exponent != 1.0 else stacked_heatmaps) def assign_center_targets_from_keypoints(self, height, width, gt_classes_list, gt_keypoints_list, gt_weights_list=None, gt_keypoints_weights_list=None): """Computes the object center heatmap target using keypoint locations. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_classes_list: A list of float tensors with shape [num_boxes, num_classes] representing the one-hot encoded class labels for each box in the gt_boxes_list. gt_keypoints_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The box coordinates are expected in normalized coordinates. gt_weights_list: A list of float tensors with shape [num_boxes] representing the weight of each groundtruth detection box. gt_keypoints_weights_list: [Optional] a list of 3D tf.float32 tensors of shape [num_instances, num_total_keypoints] representing the weights of each keypoints. If not provided, then all not NaN keypoints will be equally weighted. Returns: heatmap: A Tensor of size [batch_size, output_height, output_width, num_classes] representing the per class center heatmap. output_height and output_width are computed by dividing the input height and width by the stride specified during initialization. """ assert (self._keypoint_weights_for_center is not None and self._keypoint_class_id is not None and self._keypoint_indices is not None) out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32) out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32) # Compute the yx-grid to be used to generate the heatmap. Each returned # tensor has shape of [out_height, out_width] (y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width) heatmaps = [] if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * len(gt_keypoints_list) for keypoints, classes, kp_weights, weights in zip( gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, gt_weights_list): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=out_height, out_width=out_width, keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._keypoint_class_id, keypoint_indices=self._keypoint_indices) # _, num_keypoints, _ = ( # shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # Update the keypoint weights by the specified keypoints weights. kp_loc_weights = tf.constant( self._keypoint_weights_for_center, dtype=tf.float32) updated_kp_weights = kp_weights * kp_loc_weights[tf.newaxis, :] # Obtain the sum of the weights for each instance. # instance_weight_sum has shape: [num_instance]. instance_weight_sum = tf.reduce_sum(updated_kp_weights, axis=1) # Weight the keypoint coordinates by updated_kp_weights. # weighted_keypoints has shape: [num_instance, num_keypoints, 2] weighted_keypoints = keypoints_absolute * tf.expand_dims( updated_kp_weights, axis=2) # Compute the mean of the keypoint coordinates over the weighted # keypoints. # keypoint_mean has shape: [num_instance, 2] keypoint_mean = tf.math.divide( tf.reduce_sum(weighted_keypoints, axis=1), tf.expand_dims(instance_weight_sum, axis=-1)) # Replace the NaN values (due to divided by zeros in the above operation) # by 0.0 where the sum of instance weight is zero. # keypoint_mean has shape: [num_instance, 2] keypoint_mean = tf.where( tf.stack([instance_weight_sum, instance_weight_sum], axis=1) > 0.0, keypoint_mean, tf.zeros_like(keypoint_mean)) # Compute the distance from each keypoint to the mean location using # broadcasting and weighted by updated_kp_weights. # keypoint_dist has shape: [num_instance, num_keypoints] keypoint_mean = tf.expand_dims(keypoint_mean, axis=1) keypoint_dist = tf.math.sqrt( tf.reduce_sum( tf.math.square(keypoints_absolute - keypoint_mean), axis=2)) keypoint_dist = keypoint_dist * updated_kp_weights # Compute the average of the distances from each keypoint to the mean # location and update the average value by zero when the instance weight # is zero. # avg_radius has shape: [num_instance] avg_radius = tf.math.divide( tf.reduce_sum(keypoint_dist, axis=1), instance_weight_sum) avg_radius = tf.where( instance_weight_sum > 0.0, avg_radius, tf.zeros_like(avg_radius)) # Update the class instance weight. If the instance doesn't contain enough # valid keypoint values (i.e. instance_weight_sum == 0.0), then set the # instance weight to zero. # updated_class_weights has shape: [num_instance] updated_class_weights = tf.where( instance_weight_sum > 0.0, weights, tf.zeros_like(weights)) # Compute the sigma from average distance. We use 2 * average distance to # to approximate the width/height of the bounding box. # sigma has shape: [num_instances]. sigma = _compute_std_dev_from_box_size(2 * avg_radius, 2 * avg_radius, self._min_overlap) # Apply the Gaussian kernel to the center coordinates. Returned heatmap # has shape of [out_height, out_width, num_classes] heatmap = ta_utils.coordinates_to_heatmap( y_grid=y_grid, x_grid=x_grid, y_coordinates=keypoint_mean[:, 0, 0], x_coordinates=keypoint_mean[:, 0, 1], sigma=sigma, channel_onehot=classes, channel_weights=updated_class_weights, sparse=self._compute_heatmap_sparse) heatmaps.append(heatmap) # Return the stacked heatmaps over the batch. return tf.stack(heatmaps, axis=0) class CenterNetBoxTargetAssigner(object): """Wrapper to compute target tensors for the object detection task. This class has methods that take as input a batch of ground truth tensors (in the form of a list) and return the targets required to train the object detection task. """ def __init__(self, stride): """Initializes the target assigner. Args: stride: int, the stride of the network in output pixels. """ self._stride = stride def assign_size_and_offset_targets(self, height, width, gt_boxes_list, gt_weights_list=None, maximum_normalized_coordinate=1.1): """Returns the box height/width and center offset targets and their indices. The returned values are expected to be used with predicted tensors of size (batch_size, height//self._stride, width//self._stride, 2). The predicted values at the relevant indices can be retrieved with the get_batch_predictions_from_indices function. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The coordinates are expected in normalized coordinates. gt_weights_list: A list of tensors with shape [num_boxes] corresponding to the weight of each groundtruth detection box. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. This is used to check bounds during converting normalized coordinates to absolute coordinates. Returns: batch_indices: an integer tensor of shape [num_boxes, 3] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. batch_box_height_width: a float tensor of shape [num_boxes, 2] holding expected height and width of each box in the output space. batch_offsets: a float tensor of shape [num_boxes, 2] holding the expected y and x offset of each box in the output space. batch_weights: a float tensor of shape [num_boxes] indicating the weight of each prediction. """ if gt_weights_list is None: gt_weights_list = [None] * len(gt_boxes_list) batch_indices = [] batch_box_height_width = [] batch_weights = [] batch_offsets = [] for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)): boxes = box_list.BoxList(boxes) boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1), maximum_normalized_coordinate=maximum_normalized_coordinate) # Get the box center coordinates. Each returned tensors have the shape of # [num_boxes] (y_center, x_center, boxes_height, boxes_width) = boxes.get_center_coordinates_and_sizes() num_boxes = tf.shape(x_center) # Compute the offsets and indices of the box centers. Shape: # offsets: [num_boxes, 2] # indices: [num_boxes, 2] (offsets, indices) = ta_utils.compute_floor_offsets_with_indices( y_source=y_center, x_source=x_center) # Assign ones if weights are not provided. if weights is None: weights = tf.ones(num_boxes, dtype=tf.float32) # Shape of [num_boxes, 1] integer tensor filled with current batch index. batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_box_height_width.append( tf.stack([boxes_height, boxes_width], axis=1)) batch_weights.append(weights) batch_offsets.append(offsets) batch_indices = tf.concat(batch_indices, axis=0) batch_box_height_width = tf.concat(batch_box_height_width, axis=0) batch_weights = tf.concat(batch_weights, axis=0) batch_offsets = tf.concat(batch_offsets, axis=0) return (batch_indices, batch_box_height_width, batch_offsets, batch_weights) # TODO(yuhuic): Update this class to handle the instance/keypoint weights. # Currently those weights are used as "mask" to indicate whether an # instance/keypoint should be considered or not (expecting only either 0 or 1 # value). In reality, the weights can be any value and this class should handle # those values properly. class CenterNetKeypointTargetAssigner(object): """Wrapper to compute target tensors for the CenterNet keypoint estimation. This class has methods that take as input a batch of groundtruth tensors (in the form of a list) and returns the targets required to train the CenterNet model for keypoint estimation. Specifically, the class methods expect the groundtruth in the following formats (consistent with the standard Object Detection API). Note that usually the groundtruth tensors are packed with a list which represents the batch dimension: gt_classes_list: [Required] a list of 2D tf.float32 one-hot (or k-hot) tensors of shape [num_instances, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of shape [num_instances, num_total_keypoints, 2] containing keypoint coordinates. Note that the "num_total_keypoints" should be the sum of the num_keypoints over all possible keypoint types, e.g. human pose, face. For example, if a dataset contains both 17 human pose keypoints and 5 face keypoints, then num_total_keypoints = 17 + 5 = 22. If an intance contains only a subet of keypoints (e.g. human pose keypoints but not face keypoints), the face keypoints will be filled with zeros. Also note that keypoints are assumed to be provided in normalized coordinates and missing keypoints should be encoded as NaN. gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape [num_instances, num_total_keypoints] representing the weights of each keypoints. If not provided, then all not NaN keypoints will be equally weighted. gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape [num_instances, 4] containing coordinates of the groundtruth boxes. Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and assumed to be normalized and clipped relative to the image window with y_min <= y_max and x_min <= x_max. Note that the boxes are only used to compute the center targets but are not considered as required output of the keypoint task. If the boxes were not provided, the center targets will be inferred from the keypoints [not implemented yet]. gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape [num_instances] containing weights for groundtruth boxes. Only useful when gt_boxes_list is also provided. """ def __init__(self, stride, class_id, keypoint_indices, keypoint_std_dev=None, per_keypoint_offset=False, peak_radius=0, compute_heatmap_sparse=False, per_keypoint_depth=False): """Initializes a CenterNet keypoints target assigner. Args: stride: int, the stride of the network in output pixels. class_id: int, the ID of the class (0-indexed) that contains the target keypoints to consider in this task. For example, if the task is human pose estimation, the class id should correspond to the "human" class. keypoint_indices: A list of integers representing the indices of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints from gt_keypoints that should be considered in this task. keypoint_std_dev: A list of floats represent the standard deviation of the Gaussian kernel used to generate the keypoint heatmap (in the unit of output pixels). It is to provide the flexibility of using different sizes of Gaussian kernel for each keypoint type. If not provided, then all standard deviation will be the same as the default value (10.0 in the output pixel space). If provided, the length of keypoint_std_dev needs to be the same as the length of keypoint_indices, indicating the standard deviation of each keypoint type. per_keypoint_offset: boolean, indicating whether to assign offset for each keypoint channel. If set False, the output offset target will have the shape [batch_size, out_height, out_width, 2]. If set True, the output offset target will have the shape [batch_size, out_height, out_width, 2 * num_keypoints]. peak_radius: int, the radius (in the unit of output pixel) around heatmap peak to assign the offset targets. compute_heatmap_sparse: bool, indicating whether or not to use the sparse version of the Op that computes the heatmap. The sparse version scales better with number of keypoint types, but in some cases is known to cause an OOM error. See (b/170989061). per_keypoint_depth: A bool indicates whether the model predicts the depth of each keypoints in independent channels. Similar to per_keypoint_offset but for the keypoint depth. """ self._stride = stride self._class_id = class_id self._keypoint_indices = keypoint_indices self._per_keypoint_offset = per_keypoint_offset self._per_keypoint_depth = per_keypoint_depth self._peak_radius = peak_radius self._compute_heatmap_sparse = compute_heatmap_sparse if keypoint_std_dev is None: self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] * len(keypoint_indices)) else: assert len(keypoint_indices) == len(keypoint_std_dev) self._keypoint_std_dev = keypoint_std_dev def assign_keypoint_heatmap_targets(self, height, width, gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list=None, gt_weights_list=None, gt_boxes_list=None): """Returns the keypoint heatmap targets for the CenterNet model. Args: height: int, height of input to the CenterNet model. This is used to determine the height of the output. width: int, width of the input to the CenterNet model. This is used to determine the width of the output. gt_keypoints_list: A list of float tensors with shape [num_instances, num_total_keypoints, 2]. See class-level description for more detail. gt_classes_list: A list of float tensors with shape [num_instances, num_classes]. See class-level description for more detail. gt_keypoints_weights_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the weight of each keypoint. gt_weights_list: A list of float tensors with shape [num_instances]. See class-level description for more detail. gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See class-level description for more detail. If provided, the keypoint standard deviations will be scaled based on the box sizes. Returns: heatmap: A float tensor of shape [batch_size, output_height, output_width, num_keypoints] representing the per keypoint type center heatmap. output_height and output_width are computed by dividing the input height and width by the stride specified during initialization. Note that the "num_keypoints" is defined by the length of keypoint_indices, which is not necessarily equal to "num_total_keypoints". num_instances_batch: A 2D int tensor of shape [batch_size, num_keypoints] representing number of instances for each keypoint type. valid_mask: A float tensor with shape [batch_size, output_height, output_width, num_keypoints] where all values within the regions of the blackout boxes are 0.0 and 1.0 else where. Note that the blackout boxes are per keypoint type and are blacked out if the keypoint visibility/weight (of the corresponding keypoint type) is zero. """ out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32) out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32) # Compute the yx-grid to be used to generate the heatmap. Each returned # tensor has shape of [out_height, out_width] y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width) if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * len(gt_keypoints_list) if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) if gt_boxes_list is None: gt_boxes_list = [None] * len(gt_keypoints_list) heatmaps = [] num_instances_list = [] valid_mask_list = [] for keypoints, classes, kp_weights, weights, boxes in zip( gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, gt_weights_list, gt_boxes_list): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=out_height, out_width=out_width, keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._class_id, keypoint_indices=self._keypoint_indices) num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # A tensor of shape [num_instances, num_keypoints] with # each element representing the type dimension for each corresponding # keypoint: # [[0, 1, ..., k-1], # [0, 1, ..., k-1], # : # [0, 1, ..., k-1]] keypoint_types = tf.tile( input=tf.expand_dims(tf.range(num_keypoints), axis=0), multiples=[num_instances, 1]) # A tensor of shape [num_instances, num_keypoints] with # each element representing the sigma of the Gaussian kernel for each # keypoint. keypoint_std_dev = tf.tile( input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0), multiples=[num_instances, 1]) # If boxes is not None, then scale the standard deviation based on the # size of the object bounding boxes similar to object center heatmap. if boxes is not None: boxes = box_list.BoxList(boxes) # Convert the box coordinates to absolute output image dimension space. boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) # Get the box height and width. Each returned tensors have the shape # of [num_instances] (_, _, boxes_height, boxes_width) = boxes.get_center_coordinates_and_sizes() # Compute the sigma from box size. The tensor shape: [num_instances]. sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7) keypoint_std_dev = keypoint_std_dev * tf.stack( [sigma] * num_keypoints, axis=1) # Generate the per-keypoint type valid region mask to ignore regions # with keypoint weights equal to zeros (e.g. visibility is 0). # shape of valid_mask: [out_height, out_width, num_keypoints] kp_weight_list = tf.unstack(kp_weights, axis=1) valid_mask_channel_list = [] for kp_weight in kp_weight_list: blackout = kp_weight < 1e-3 valid_mask_channel_list.append( ta_utils.blackout_pixel_weights_by_box_regions( out_height, out_width, boxes.get(), blackout)) valid_mask = tf.stack(valid_mask_channel_list, axis=2) valid_mask_list.append(valid_mask) # Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap # has shape of [out_height, out_width, num_keypoints]. heatmap = ta_utils.coordinates_to_heatmap( y_grid=y_grid, x_grid=x_grid, y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]), x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]), sigma=tf.keras.backend.flatten(keypoint_std_dev), channel_onehot=tf.one_hot( tf.keras.backend.flatten(keypoint_types), depth=num_keypoints), channel_weights=tf.keras.backend.flatten(kp_weights)) num_instances_list.append( tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32)) heatmaps.append(heatmap) return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0), tf.stack(valid_mask_list, axis=0)) def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors): """Gets keypoint type index tensor. The function prepares the tensor of keypoint indices with shape [num_instances, num_keypoints, num_neighbors]. Each element represents the keypoint type index for each corresponding keypoint and tiled along the 3rd axis: [[0, 1, ..., num_keypoints - 1], [0, 1, ..., num_keypoints - 1], : [0, 1, ..., num_keypoints - 1]] Args: num_instances: int, the number of instances, used to define the 1st dimension. num_keypoints: int, the number of keypoint types, used to define the 2nd dimension. num_neighbors: int, the number of neighborhood pixels to consider for each keypoint, used to define the 3rd dimension. Returns: A integer tensor of shape [num_instances, num_keypoints, num_neighbors]. """ keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis] tiled_keypoint_types = tf.tile(keypoint_types, multiples=[num_instances, 1, num_neighbors]) return tiled_keypoint_types def assign_keypoints_offset_targets(self, height, width, gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list=None, gt_weights_list=None): """Returns the offsets and indices of the keypoints for location refinement. The returned values are used to refine the location of each keypoints in the heatmap. The predicted values at the relevant indices can be retrieved with the get_batch_predictions_from_indices function. Args: height: int, height of input to the CenterNet model. This is used to determine the height of the output. width: int, width of the input to the CenterNet model. This is used to determine the width of the output. gt_keypoints_list: A list of tensors with shape [num_instances, num_total_keypoints]. See class-level description for more detail. gt_classes_list: A list of tensors with shape [num_instances, num_classes]. See class-level description for more detail. gt_keypoints_weights_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the weight of each keypoint. gt_weights_list: A list of float tensors with shape [num_instances]. See class-level description for more detail. Returns: batch_indices: an integer tensor of shape [num_total_instances, 3] (or [num_total_instances, 4] if 'per_keypoint_offset' is set True) holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. The fourth column corresponds to the channel dimension (if 'per_keypoint_offset' is set True). batch_offsets: a float tensor of shape [num_total_instances, 2] holding the expected y and x offset of each box in the output space. batch_weights: a float tensor of shape [num_total_instances] indicating the weight of each prediction. Note that num_total_instances = batch_size * num_instances * num_keypoints * num_neighbors """ batch_indices = [] batch_offsets = [] batch_weights = [] if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * len(gt_keypoints_list) if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) for i, (keypoints, classes, kp_weights, weights) in enumerate( zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, gt_weights_list)): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=tf.maximum(height // self._stride, 1), out_width=tf.maximum(width // self._stride, 1), keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._class_id, keypoint_indices=self._keypoint_indices) num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # [num_instances * num_keypoints] y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0]) x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1]) # All keypoint coordinates and their neighbors: # [num_instance * num_keypoints, num_neighbors] (y_source_neighbors, x_source_neighbors, valid_sources) = ta_utils.get_surrounding_grids( tf.cast(tf.maximum(height // self._stride, 1), tf.float32), tf.cast(tf.maximum(width // self._stride, 1), tf.float32), y_source, x_source, self._peak_radius) _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( y_source_neighbors) # Update the valid keypoint weights. # [num_instance * num_keypoints, num_neighbors] valid_keypoints = tf.cast( valid_sources, dtype=tf.float32) * tf.stack( [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) # Compute the offsets and indices of the box centers. Shape: # offsets: [num_instances * num_keypoints, num_neighbors, 2] # indices: [num_instances * num_keypoints, num_neighbors, 2] offsets, indices = ta_utils.compute_floor_offsets_with_indices( y_source=y_source_neighbors, x_source=x_source_neighbors, y_target=y_source, x_target=x_source) # Reshape to: # offsets: [num_instances * num_keypoints * num_neighbors, 2] # indices: [num_instances * num_keypoints * num_neighbors, 2] offsets = tf.reshape(offsets, [-1, 2]) indices = tf.reshape(indices, [-1, 2]) # Prepare the batch indices to be prepended. batch_index = tf.fill( [num_instances * num_keypoints * num_neighbors, 1], i) if self._per_keypoint_offset: tiled_keypoint_types = self._get_keypoint_types( num_instances, num_keypoints, num_neighbors) batch_indices.append( tf.concat([batch_index, indices, tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) else: batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_offsets.append(offsets) batch_weights.append(tf.keras.backend.flatten(valid_keypoints)) # Concatenate the tensors in the batch in the first dimension: # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or # [batch_size * num_instances * num_keypoints * num_neighbors, 4] if # 'per_keypoint_offset' is set to True. batch_indices = tf.concat(batch_indices, axis=0) # shape: [batch_size * num_instances * num_keypoints * num_neighbors] batch_weights = tf.concat(batch_weights, axis=0) # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2] batch_offsets = tf.concat(batch_offsets, axis=0) return (batch_indices, batch_offsets, batch_weights) def assign_keypoints_depth_targets(self, height, width, gt_keypoints_list, gt_classes_list, gt_keypoint_depths_list, gt_keypoint_depth_weights_list, gt_keypoints_weights_list=None, gt_weights_list=None): """Returns the target depths of the keypoints. The returned values are the relative depth information of each keypoints. Args: height: int, height of input to the CenterNet model. This is used to determine the height of the output. width: int, width of the input to the CenterNet model. This is used to determine the width of the output. gt_keypoints_list: A list of tensors with shape [num_instances, num_total_keypoints, 2]. See class-level description for more detail. gt_classes_list: A list of tensors with shape [num_instances, num_classes]. See class-level description for more detail. gt_keypoint_depths_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the relative depth of the keypoints. gt_keypoint_depth_weights_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the weights of the relative depth. gt_keypoints_weights_list: A list of tensors with shape [num_instances, num_total_keypoints] corresponding to the weight of each keypoint. gt_weights_list: A list of float tensors with shape [num_instances]. See class-level description for more detail. Returns: batch_indices: an integer tensor of shape [num_total_instances, 3] (or [num_total_instances, 4] if 'per_keypoint_depth' is set True) holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. The fourth column corresponds to the channel dimension (if 'per_keypoint_offset' is set True). batch_depths: a float tensor of shape [num_total_instances, 1] (or [num_total_instances, num_keypoints] if per_keypoint_depth is set True) indicating the target depth of each keypoint. batch_weights: a float tensor of shape [num_total_instances] indicating the weight of each prediction. Note that num_total_instances = batch_size * num_instances * num_keypoints * num_neighbors """ batch_indices = [] batch_weights = [] batch_depths = [] if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * len(gt_keypoints_list) if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) if gt_keypoint_depths_list is None: gt_keypoint_depths_list = [None] * len(gt_classes_list) for i, (keypoints, classes, kp_weights, weights, keypoint_depths, keypoint_depth_weights) in enumerate( zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list, gt_weights_list, gt_keypoint_depths_list, gt_keypoint_depth_weights_list)): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=tf.maximum(height // self._stride, 1), out_width=tf.maximum(width // self._stride, 1), keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._class_id, keypoint_indices=self._keypoint_indices) num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # [num_instances * num_keypoints] y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0]) x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1]) # All keypoint coordinates and their neighbors: # [num_instance * num_keypoints, num_neighbors] (y_source_neighbors, x_source_neighbors, valid_sources) = ta_utils.get_surrounding_grids( tf.cast(tf.maximum(height // self._stride, 1), tf.float32), tf.cast(tf.maximum(width // self._stride, 1), tf.float32), y_source, x_source, self._peak_radius) _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( y_source_neighbors) # Update the valid keypoint weights. # [num_instance * num_keypoints, num_neighbors] valid_keypoints = tf.cast( valid_sources, dtype=tf.float32) * tf.stack( [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) # Compute the offsets and indices of the box centers. Shape: # indices: [num_instances * num_keypoints, num_neighbors, 2] _, indices = ta_utils.compute_floor_offsets_with_indices( y_source=y_source_neighbors, x_source=x_source_neighbors, y_target=y_source, x_target=x_source) # Reshape to: # indices: [num_instances * num_keypoints * num_neighbors, 2] indices = tf.reshape(indices, [-1, 2]) # Gather the keypoint depth from corresponding keypoint indices: # [num_instances, num_keypoints] keypoint_depths = tf.gather( keypoint_depths, self._keypoint_indices, axis=1) # Tile the depth target to surrounding pixels. # [num_instances, num_keypoints, num_neighbors] tiled_keypoint_depths = tf.tile( tf.expand_dims(keypoint_depths, axis=-1), multiples=[1, 1, num_neighbors]) # [num_instances, num_keypoints] keypoint_depth_weights = tf.gather( keypoint_depth_weights, self._keypoint_indices, axis=1) # [num_instances, num_keypoints, num_neighbors] keypoint_depth_weights = tf.tile( tf.expand_dims(keypoint_depth_weights, axis=-1), multiples=[1, 1, num_neighbors]) # Update the weights of keypoint depth by the weights of the keypoints. # A keypoint depth target is valid only if its corresponding keypoint # target is also valid. # [num_instances, num_keypoints, num_neighbors] tiled_depth_weights = ( tf.reshape(valid_keypoints, [num_instances, num_keypoints, num_neighbors]) * keypoint_depth_weights) invalid_depths = tf.logical_or( tf.math.is_nan(tiled_depth_weights), tf.math.is_nan(tiled_keypoint_depths)) # Assign zero values and weights to NaN values. final_keypoint_depths = tf.where(invalid_depths, tf.zeros_like(tiled_keypoint_depths), tiled_keypoint_depths) final_keypoint_depth_weights = tf.where( invalid_depths, tf.zeros_like(tiled_depth_weights), tiled_depth_weights) # [num_instances * num_keypoints * num_neighbors, 1] batch_depths.append(tf.reshape(final_keypoint_depths, [-1, 1])) # Prepare the batch indices to be prepended. batch_index = tf.fill( [num_instances * num_keypoints * num_neighbors, 1], i) if self._per_keypoint_depth: tiled_keypoint_types = self._get_keypoint_types( num_instances, num_keypoints, num_neighbors) batch_indices.append( tf.concat([batch_index, indices, tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) else: batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_weights.append( tf.keras.backend.flatten(final_keypoint_depth_weights)) # Concatenate the tensors in the batch in the first dimension: # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or # [batch_size * num_instances * num_keypoints * num_neighbors, 4] if # 'per_keypoint_offset' is set to True. batch_indices = tf.concat(batch_indices, axis=0) # shape: [batch_size * num_instances * num_keypoints * num_neighbors] batch_weights = tf.concat(batch_weights, axis=0) # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 1] batch_depths = tf.concat(batch_depths, axis=0) return (batch_indices, batch_depths, batch_weights) def assign_joint_regression_targets(self, height, width, gt_keypoints_list, gt_classes_list, gt_boxes_list=None, gt_keypoints_weights_list=None, gt_weights_list=None): """Returns the joint regression from center grid to keypoints. The joint regression is used as the grouping cue from the estimated keypoints to instance center. The offsets are the vectors from the floored object center coordinates to the keypoint coordinates. Args: height: int, height of input to the CenterNet model. This is used to determine the height of the output. width: int, width of the input to the CenterNet model. This is used to determine the width of the output. gt_keypoints_list: A list of float tensors with shape [num_instances, num_total_keypoints]. See class-level description for more detail. gt_classes_list: A list of float tensors with shape [num_instances, num_classes]. See class-level description for more detail. gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See class-level description for more detail. If provided, then the center targets will be computed based on the center of the boxes. gt_keypoints_weights_list: A list of float tensors with shape [num_instances, num_total_keypoints] representing to the weight of each keypoint. gt_weights_list: A list of float tensors with shape [num_instances]. See class-level description for more detail. Returns: batch_indices: an integer tensor of shape [num_instances, 4] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively, the last dimension refers to the keypoint type dimension. batch_offsets: a float tensor of shape [num_instances, 2] holding the expected y and x offset of each box in the output space. batch_weights: a float tensor of shape [num_instances] indicating the weight of each prediction. Note that num_total_instances = batch_size * num_instances * num_keypoints Raises: NotImplementedError: currently the object center coordinates need to be computed from groundtruth bounding boxes. The functionality of generating the object center coordinates from keypoints is not implemented yet. """ batch_indices = [] batch_offsets = [] batch_weights = [] batch_size = len(gt_keypoints_list) if gt_keypoints_weights_list is None: gt_keypoints_weights_list = [None] * batch_size if gt_boxes_list is None: gt_boxes_list = [None] * batch_size if gt_weights_list is None: gt_weights_list = [None] * len(gt_classes_list) for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate( zip(gt_keypoints_list, gt_classes_list, gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)): keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights( out_height=tf.maximum(height // self._stride, 1), out_width=tf.maximum(width // self._stride, 1), keypoints=keypoints, class_onehot=classes, class_weights=weights, keypoint_weights=kp_weights, class_id=self._class_id, keypoint_indices=self._keypoint_indices) num_instances, num_keypoints, _ = ( shape_utils.combined_static_and_dynamic_shape(keypoints_absolute)) # If boxes are provided, compute the joint center from it. if boxes is not None: # Compute joint center from boxes. boxes = box_list.BoxList(boxes) boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes() else: # TODO(yuhuic): Add the logic to generate object centers from keypoints. raise NotImplementedError(( 'The functionality of generating object centers from keypoints is' ' not implemented yet. Please provide groundtruth bounding boxes.' )) # Tile the yx center coordinates to be the same shape as keypoints. y_center_tiled = tf.tile( tf.reshape(y_center, shape=[num_instances, 1]), multiples=[1, num_keypoints]) x_center_tiled = tf.tile( tf.reshape(x_center, shape=[num_instances, 1]), multiples=[1, num_keypoints]) # [num_instance * num_keypoints, num_neighbors] (y_source_neighbors, x_source_neighbors, valid_sources) = ta_utils.get_surrounding_grids( tf.cast(tf.maximum(height // self._stride, 1), tf.float32), tf.cast(tf.maximum(width // self._stride, 1), tf.float32), tf.keras.backend.flatten(y_center_tiled), tf.keras.backend.flatten(x_center_tiled), self._peak_radius) _, num_neighbors = shape_utils.combined_static_and_dynamic_shape( y_source_neighbors) valid_keypoints = tf.cast( valid_sources, dtype=tf.float32) * tf.stack( [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1) # Compute the offsets and indices of the box centers. Shape: # offsets: [num_instances * num_keypoints, 2] # indices: [num_instances * num_keypoints, 2] (offsets, indices) = ta_utils.compute_floor_offsets_with_indices( y_source=y_source_neighbors, x_source=x_source_neighbors, y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]), x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1])) # Reshape to: # offsets: [num_instances * num_keypoints * num_neighbors, 2] # indices: [num_instances * num_keypoints * num_neighbors, 2] offsets = tf.reshape(offsets, [-1, 2]) indices = tf.reshape(indices, [-1, 2]) # keypoint type tensor: [num_instances, num_keypoints, num_neighbors]. tiled_keypoint_types = self._get_keypoint_types( num_instances, num_keypoints, num_neighbors) batch_index = tf.fill( [num_instances * num_keypoints * num_neighbors, 1], i) batch_indices.append( tf.concat([batch_index, indices, tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1)) batch_offsets.append(offsets) batch_weights.append(tf.keras.backend.flatten(valid_keypoints)) # Concatenate the tensors in the batch in the first dimension: # shape: [batch_size * num_instances * num_keypoints, 4] batch_indices = tf.concat(batch_indices, axis=0) # shape: [batch_size * num_instances * num_keypoints] batch_weights = tf.concat(batch_weights, axis=0) # shape: [batch_size * num_instances * num_keypoints, 2] batch_offsets = tf.concat(batch_offsets, axis=0) return (batch_indices, batch_offsets, batch_weights) def _resize_masks(masks, height, width, method): # Resize segmentation masks to conform to output dimensions. Use TF2 # image resize because TF1's version is buggy: # https://yaqs.corp.google.com/eng/q/4970450458378240 masks = tf2.image.resize( masks[:, :, :, tf.newaxis], size=(height, width), method=method) return masks[:, :, :, 0] class CenterNetMaskTargetAssigner(object): """Wrapper to compute targets for segmentation masks.""" def __init__(self, stride, boxes_scale=1.0): """Constructor. Args: stride: The stride of the network. Targets are assigned at the output stride. boxes_scale: Scale to apply to boxes before producing mask weights. This is meant to ensure the full object region is properly weighted prior to applying loss. A value of ~1.05 is typically applied when object regions should be blacked out (perhaps because valid groundtruth masks are not present). """ self._stride = stride self._boxes_scale = boxes_scale def assign_segmentation_targets( self, gt_masks_list, gt_classes_list, gt_boxes_list=None, gt_mask_weights_list=None, mask_resize_method=ResizeMethod.BILINEAR): """Computes the segmentation targets. This utility produces a semantic segmentation mask for each class, starting with whole image instance segmentation masks. Effectively, each per-class segmentation target is the union of all masks from that class. Args: gt_masks_list: A list of float tensors with shape [num_boxes, input_height, input_width] with values in {0, 1} representing instance masks for each object. gt_classes_list: A list of float tensors with shape [num_boxes, num_classes] representing the one-hot encoded class labels for each box in the gt_boxes_list. gt_boxes_list: An optional list of float tensors with shape [num_boxes, 4] with normalized boxes corresponding to each mask. The boxes are used to spatially allocate mask weights. gt_mask_weights_list: An optional list of float tensors with shape [num_boxes] with weights for each mask. If a mask has a zero weight, it indicates that the box region associated with the mask should not contribute to the loss. If not provided, will use a per-pixel weight of 1. mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use when resizing masks from input resolution to output resolution. Returns: segmentation_targets: An int32 tensor of size [batch_size, output_height, output_width, num_classes] representing the class of each location in the output space. segmentation_weight: A float32 tensor of size [batch_size, output_height, output_width] indicating the loss weight to apply at each location. """ _, num_classes = shape_utils.combined_static_and_dynamic_shape( gt_classes_list[0]) _, input_height, input_width = ( shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0])) output_height = tf.maximum(input_height // self._stride, 1) output_width = tf.maximum(input_width // self._stride, 1) if gt_boxes_list is None: gt_boxes_list = [None] * len(gt_masks_list) if gt_mask_weights_list is None: gt_mask_weights_list = [None] * len(gt_masks_list) segmentation_targets_list = [] segmentation_weights_list = [] for gt_boxes, gt_masks, gt_mask_weights, gt_classes in zip( gt_boxes_list, gt_masks_list, gt_mask_weights_list, gt_classes_list): if gt_boxes is not None and gt_mask_weights is not None: boxes = box_list.BoxList(gt_boxes) # Convert the box coordinates to absolute output image dimension space. boxes_absolute = box_list_ops.to_absolute_coordinates( boxes, output_height, output_width) # Generate a segmentation weight that applies mask weights in object # regions. blackout = gt_mask_weights <= 0 segmentation_weight_for_image = ( ta_utils.blackout_pixel_weights_by_box_regions( output_height, output_width, boxes_absolute.get(), blackout, weights=gt_mask_weights, boxes_scale=self._boxes_scale)) segmentation_weights_list.append(segmentation_weight_for_image) else: segmentation_weights_list.append(tf.ones((output_height, output_width), dtype=tf.float32)) gt_masks = _resize_masks(gt_masks, output_height, output_width, mask_resize_method) gt_masks = gt_masks[:, :, :, tf.newaxis] gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes]) # Shape: [h, w, num_classes]. segmentations_for_image = tf.reduce_max( gt_masks * gt_classes_reshaped, axis=0) # Avoid the case where max of an empty array is -inf. segmentations_for_image = tf.maximum(segmentations_for_image, 0.0) segmentation_targets_list.append(segmentations_for_image) segmentation_target = tf.stack(segmentation_targets_list, axis=0) segmentation_weight = tf.stack(segmentation_weights_list, axis=0) return segmentation_target, segmentation_weight class CenterNetDensePoseTargetAssigner(object): """Wrapper to compute targets for DensePose task.""" def __init__(self, stride, num_parts=24): self._stride = stride self._num_parts = num_parts def assign_part_and_coordinate_targets(self, height, width, gt_dp_num_points_list, gt_dp_part_ids_list, gt_dp_surface_coords_list, gt_weights_list=None): """Returns the DensePose part_id and coordinate targets and their indices. The returned values are expected to be used with predicted tensors of size (batch_size, height//self._stride, width//self._stride, 2). The predicted values at the relevant indices can be retrieved with the get_batch_predictions_from_indices function. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes] containing the number of DensePose sampled points per box. gt_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape [num_boxes, max_sampled_points] containing the DensePose part ids (0-indexed) for each sampled point. Note that there may be padding, as boxes may contain a different number of sampled points. gt_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape [num_boxes, max_sampled_points, 4] containing the DensePose surface coordinates (normalized) for each sampled point. Note that there may be padding. gt_weights_list: A list of 1-D tensors with shape [num_boxes] corresponding to the weight of each groundtruth detection box. Returns: batch_indices: an integer tensor of shape [num_total_points, 4] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. The fourth column is the part index. batch_part_ids: an int tensor of shape [num_total_points, num_parts] holding 1-hot encodings of parts for each sampled point. batch_surface_coords: a float tensor of shape [num_total_points, 2] holding the expected (v, u) coordinates for each sampled point. batch_weights: a float tensor of shape [num_total_points] indicating the weight of each prediction. Note that num_total_points = batch_size * num_boxes * max_sampled_points. """ if gt_weights_list is None: gt_weights_list = [None] * len(gt_dp_num_points_list) batch_indices = [] batch_part_ids = [] batch_surface_coords = [] batch_weights = [] for i, (num_points, part_ids, surface_coords, weights) in enumerate( zip(gt_dp_num_points_list, gt_dp_part_ids_list, gt_dp_surface_coords_list, gt_weights_list)): num_boxes, max_sampled_points = ( shape_utils.combined_static_and_dynamic_shape(part_ids)) part_ids_flattened = tf.reshape(part_ids, [-1]) part_ids_one_hot = tf.one_hot(part_ids_flattened, depth=self._num_parts) # Get DensePose coordinates in the output space. surface_coords_abs = densepose_ops.to_absolute_coordinates( surface_coords, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) surface_coords_abs = tf.reshape(surface_coords_abs, [-1, 4]) # Each tensor has shape [num_boxes * max_sampled_points]. yabs, xabs, v, u = tf.unstack(surface_coords_abs, axis=-1) # Get the indices (in output space) for the DensePose coordinates. Note # that if self._stride is larger than 1, this will have the effect of # reducing spatial resolution of the groundtruth points. indices_y = tf.cast(yabs, tf.int32) indices_x = tf.cast(xabs, tf.int32) # Assign ones if weights are not provided. if weights is None: weights = tf.ones(num_boxes, dtype=tf.float32) # Create per-point weights. weights_per_point = tf.reshape( tf.tile(weights[:, tf.newaxis], multiples=[1, max_sampled_points]), shape=[-1]) # Mask out invalid (i.e. padded) DensePose points. num_points_tiled = tf.tile(num_points[:, tf.newaxis], multiples=[1, max_sampled_points]) range_tiled = tf.tile(tf.range(max_sampled_points)[tf.newaxis, :], multiples=[num_boxes, 1]) valid_points = tf.math.less(range_tiled, num_points_tiled) valid_points = tf.cast(tf.reshape(valid_points, [-1]), dtype=tf.float32) weights_per_point = weights_per_point * valid_points # Shape of [num_boxes * max_sampled_points] integer tensor filled with # current batch index. batch_index = i * tf.ones_like(indices_y, dtype=tf.int32) batch_indices.append( tf.stack([batch_index, indices_y, indices_x, part_ids_flattened], axis=1)) batch_part_ids.append(part_ids_one_hot) batch_surface_coords.append(tf.stack([v, u], axis=1)) batch_weights.append(weights_per_point) batch_indices = tf.concat(batch_indices, axis=0) batch_part_ids = tf.concat(batch_part_ids, axis=0) batch_surface_coords = tf.concat(batch_surface_coords, axis=0) batch_weights = tf.concat(batch_weights, axis=0) return batch_indices, batch_part_ids, batch_surface_coords, batch_weights class CenterNetTrackTargetAssigner(object): """Wrapper to compute targets for tracking task. Reference paper: A Simple Baseline for Multi-Object Tracking [1] [1]: https://arxiv.org/abs/2004.01888 """ def __init__(self, stride, num_track_ids): self._stride = stride self._num_track_ids = num_track_ids def assign_track_targets(self, height, width, gt_track_ids_list, gt_boxes_list, gt_weights_list=None): """Computes the track ID targets. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_track_ids_list: A list of 1-D tensors with shape [num_boxes] corresponding to the track ID of each groundtruth detection box. gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The coordinates are expected in normalized coordinates. gt_weights_list: A list of 1-D tensors with shape [num_boxes] corresponding to the weight of each groundtruth detection box. Returns: batch_indices: an integer tensor of shape [batch_size, num_boxes, 3] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. batch_weights: a float tensor of shape [batch_size, num_boxes] indicating the weight of each prediction. track_id_targets: An int32 tensor of size [batch_size, num_boxes, num_track_ids] containing the one-hot track ID vector of each groundtruth detection box. """ track_id_targets = tf.one_hot( gt_track_ids_list, depth=self._num_track_ids, axis=-1) if gt_weights_list is None: gt_weights_list = [None] * len(gt_boxes_list) batch_indices = [] batch_weights = [] for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)): boxes = box_list.BoxList(boxes) boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) # Get the box center coordinates. Each returned tensors have the shape of # [num_boxes] (y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes() num_boxes = tf.shape(x_center) # Compute the indices of the box centers. Shape: # indices: [num_boxes, 2] (_, indices) = ta_utils.compute_floor_offsets_with_indices( y_source=y_center, x_source=x_center) # Assign ones if weights are not provided. if weights is None: weights = tf.ones(num_boxes, dtype=tf.float32) # Shape of [num_boxes, 1] integer tensor filled with current batch index. batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_weights.append(weights) batch_indices = tf.stack(batch_indices, axis=0) batch_weights = tf.stack(batch_weights, axis=0) return batch_indices, batch_weights, track_id_targets def filter_mask_overlap_min_area(masks): """If a pixel belongs to 2 instances, remove it from the larger instance.""" num_instances = tf.shape(masks)[0] def _filter_min_area(): """Helper function to filter non empty masks.""" areas = tf.reduce_sum(masks, axis=[1, 2], keepdims=True) per_pixel_area = masks * areas # Make sure background is ignored in argmin. per_pixel_area = (masks * per_pixel_area + (1 - masks) * per_pixel_area.dtype.max) min_index = tf.cast(tf.argmin(per_pixel_area, axis=0), tf.int32) filtered_masks = ( tf.range(num_instances)[:, tf.newaxis, tf.newaxis] == min_index[tf.newaxis, :, :] ) return tf.cast(filtered_masks, tf.float32) * masks return tf.cond(num_instances > 0, _filter_min_area, lambda: masks) def filter_mask_overlap(masks, method='min_area'): if method == 'min_area': return filter_mask_overlap_min_area(masks) else: raise ValueError('Unknown mask overlap filter type - {}'.format(method)) class CenterNetCornerOffsetTargetAssigner(object): """Wrapper to compute corner offsets for boxes using masks.""" def __init__(self, stride, overlap_resolution='min_area'): """Initializes the corner offset target assigner. Args: stride: int, the stride of the network in output pixels. overlap_resolution: string, specifies how we handle overlapping instance masks. Currently only 'min_area' is supported which assigns overlapping pixels to the instance with the minimum area. """ self._stride = stride self._overlap_resolution = overlap_resolution def assign_corner_offset_targets( self, gt_boxes_list, gt_masks_list): """Computes the corner offset targets and foreground map. For each pixel that is part of any object's foreground, this function computes the relative offsets to the top-left and bottom-right corners of that instance's bounding box. It also returns a foreground map to indicate which pixels contain valid corner offsets. Args: gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The coordinates are expected in normalized coordinates. gt_masks_list: A list of float tensors with shape [num_boxes, input_height, input_width] with values in {0, 1} representing instance masks for each object. Returns: corner_offsets: A float tensor of shape [batch_size, height, width, 4] containing, in order, the (y, x) offsets to the top left corner and the (y, x) offsets to the bottom right corner for each foregroung pixel foreground: A float tensor of shape [batch_size, height, width] in which each pixel is set to 1 if it is a part of any instance's foreground (and thus contains valid corner offsets) and 0 otherwise. """ _, input_height, input_width = ( shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0])) output_height = tf.maximum(input_height // self._stride, 1) output_width = tf.maximum(input_width // self._stride, 1) y_grid, x_grid = tf.meshgrid( tf.range(output_height), tf.range(output_width), indexing='ij') y_grid, x_grid = tf.cast(y_grid, tf.float32), tf.cast(x_grid, tf.float32) corner_targets = [] foreground_targets = [] for gt_masks, gt_boxes in zip(gt_masks_list, gt_boxes_list): gt_masks = _resize_masks(gt_masks, output_height, output_width, method=ResizeMethod.NEAREST_NEIGHBOR) gt_masks = filter_mask_overlap(gt_masks, self._overlap_resolution) output_height = tf.cast(output_height, tf.float32) output_width = tf.cast(output_width, tf.float32) ymin, xmin, ymax, xmax = tf.unstack(gt_boxes, axis=1) ymin, ymax = ymin * output_height, ymax * output_height xmin, xmax = xmin * output_width, xmax * output_width top_y = ymin[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis] left_x = xmin[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis] bottom_y = ymax[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis] right_x = xmax[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis] foreground_target = tf.cast(tf.reduce_sum(gt_masks, axis=0) > 0.5, tf.float32) foreground_targets.append(foreground_target) corner_target = tf.stack([ tf.reduce_sum(top_y * gt_masks, axis=0), tf.reduce_sum(left_x * gt_masks, axis=0), tf.reduce_sum(bottom_y * gt_masks, axis=0), tf.reduce_sum(right_x * gt_masks, axis=0), ], axis=2) corner_targets.append(corner_target) return (tf.stack(corner_targets, axis=0), tf.stack(foreground_targets, axis=0)) class CenterNetTemporalOffsetTargetAssigner(object): """Wrapper to compute target tensors for the temporal offset task. This class has methods that take as input a batch of ground truth tensors (in the form of a list) and returns the targets required to train the temporal offset task. """ def __init__(self, stride): """Initializes the target assigner. Args: stride: int, the stride of the network in output pixels. """ self._stride = stride def assign_temporal_offset_targets(self, height, width, gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list=None): """Returns the temporal offset targets and their indices. For each ground truth box, this function assigns it the corresponding temporal offset to train the model. Args: height: int, height of input to the model. This is used to determine the height of the output. width: int, width of the input to the model. This is used to determine the width of the output. gt_boxes_list: A list of float tensors with shape [num_boxes, 4] representing the groundtruth detection bounding boxes for each sample in the batch. The coordinates are expected in normalized coordinates. gt_offsets_list: A list of 2-D tf.float32 tensors of shape [num_boxes, 2] containing the spatial offsets of objects' centers compared with the previous frame. gt_match_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing flags that indicate if an object has existed in the previous frame. gt_weights_list: A list of tensors with shape [num_boxes] corresponding to the weight of each groundtruth detection box. Returns: batch_indices: an integer tensor of shape [num_boxes, 3] holding the indices inside the predicted tensor which should be penalized. The first column indicates the index along the batch dimension and the second and third columns indicate the index along the y and x dimensions respectively. batch_temporal_offsets: a float tensor of shape [num_boxes, 2] of the expected y and x temporal offset of each object center in the output space. batch_weights: a float tensor of shape [num_boxes] indicating the weight of each prediction. """ if gt_weights_list is None: gt_weights_list = [None] * len(gt_boxes_list) batch_indices = [] batch_weights = [] batch_temporal_offsets = [] for i, (boxes, offsets, match_flags, weights) in enumerate(zip( gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list)): boxes = box_list.BoxList(boxes) boxes = box_list_ops.to_absolute_coordinates( boxes, tf.maximum(height // self._stride, 1), tf.maximum(width // self._stride, 1)) # Get the box center coordinates. Each returned tensors have the shape of # [num_boxes] (y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes() num_boxes = tf.shape(x_center) # Compute the offsets and indices of the box centers. Shape: # offsets: [num_boxes, 2] # indices: [num_boxes, 2] (_, indices) = ta_utils.compute_floor_offsets_with_indices( y_source=y_center, x_source=x_center) # Assign ones if weights are not provided. # if an object is not matched, its weight becomes zero. if weights is None: weights = tf.ones(num_boxes, dtype=tf.float32) weights *= match_flags # Shape of [num_boxes, 1] integer tensor filled with current batch index. batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32) batch_indices.append(tf.concat([batch_index, indices], axis=1)) batch_weights.append(weights) batch_temporal_offsets.append(offsets) batch_indices = tf.concat(batch_indices, axis=0) batch_weights = tf.concat(batch_weights, axis=0) batch_temporal_offsets = tf.concat(batch_temporal_offsets, axis=0) return (batch_indices, batch_temporal_offsets, batch_weights) class DETRTargetAssigner(object): """Target assigner for DETR (https://arxiv.org/abs/2005.12872). Detection Transformer (DETR) matches predicted boxes to groundtruth directly to determine targets instead of matching anchors to groundtruth. Hence, the new target assigner. """ def __init__(self): """Construct Object Detection Target Assigner.""" self._similarity_calc = sim_calc.DETRSimilarity() self._matcher = hungarian_matcher.HungarianBipartiteMatcher() def batch_assign(self, pred_box_batch, gt_box_batch, pred_class_batch, gt_class_targets_batch, gt_weights_batch=None, unmatched_class_label_batch=None): """Batched assignment of classification and regression targets. Args: pred_box_batch: a tensor of shape [batch_size, num_queries, 4] representing predicted bounding boxes. gt_box_batch: a tensor of shape [batch_size, num_queries, 4] representing groundtruth bounding boxes. pred_class_batch: A list of tensors with length batch_size, where each each tensor has shape [num_queries, num_classes] to be used by certain similarity calculators. gt_class_targets_batch: a list of tensors with length batch_size, where each tensor has shape [num_gt_boxes_i, num_classes] and num_gt_boxes_i is the number of boxes in the ith boxlist of gt_box_batch. gt_weights_batch: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. unmatched_class_label_batch: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the `gt_class_targets_batch`. Returns: batch_cls_targets: a tensor with shape [batch_size, num_pred_boxes, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_pred_boxes, num_classes], batch_reg_targets: a tensor with shape [batch_size, num_pred_boxes, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_pred_boxes]. """ pred_box_batch = [ box_list.BoxList(pred_box) for pred_box in tf.unstack(pred_box_batch)] gt_box_batch = [ box_list.BoxList(gt_box) for gt_box in tf.unstack(gt_box_batch)] cls_targets_list = [] cls_weights_list = [] reg_targets_list = [] reg_weights_list = [] if gt_weights_batch is None: gt_weights_batch = [None] * len(gt_class_targets_batch) if unmatched_class_label_batch is None: unmatched_class_label_batch = [None] * len(gt_class_targets_batch) pred_class_batch = tf.unstack(pred_class_batch) for (pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights, unmatched_class_label) in zip(pred_box_batch, gt_box_batch, pred_class_batch, gt_class_targets_batch, gt_weights_batch, unmatched_class_label_batch): (cls_targets, cls_weights, reg_targets, reg_weights) = self.assign(pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights, unmatched_class_label) cls_targets_list.append(cls_targets) cls_weights_list.append(cls_weights) reg_targets_list.append(reg_targets) reg_weights_list.append(reg_weights) batch_cls_targets = tf.stack(cls_targets_list) batch_cls_weights = tf.stack(cls_weights_list) batch_reg_targets = tf.stack(reg_targets_list) batch_reg_weights = tf.stack(reg_weights_list) return (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights) def assign(self, pred_boxes, gt_boxes, pred_classes, gt_labels, gt_weights=None, unmatched_class_label=None): """Assign classification and regression targets to each box_pred. For a given set of pred_boxes and groundtruth detections, match pred_boxes to gt_boxes and assign classification and regression targets to each box_pred as well as weights based on the resulting match (specifying, e.g., which pred_boxes should not contribute to training loss). pred_boxes that are not matched to anything are given a classification target of `unmatched_cls_target`. Args: pred_boxes: a BoxList representing N pred_boxes gt_boxes: a BoxList representing M groundtruth boxes pred_classes: A tensor with shape [max_num_boxes, num_classes] to be used by certain similarity calculators. gt_labels: a tensor of shape [M, num_classes] with labels for each of the ground_truth boxes. The subshape [num_classes] can be empty (corresponding to scalar inputs). When set to None, gt_labels assumes a binary problem where all ground_truth boxes get a positive label (of 1). gt_weights: a float tensor of shape [M] indicating the weight to assign to all pred_boxes match to a particular groundtruth box. The weights must be in [0., 1.]. If None, all weights are set to 1. Generally no groundtruth boxes with zero weight match to any pred_boxes as matchers are aware of groundtruth weights. Additionally, `cls_weights` and `reg_weights` are calculated using groundtruth weights as an added safety. unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). Returns: cls_targets: a float32 tensor with shape [num_pred_boxes, num_classes], where the subshape [num_classes] is compatible with gt_labels which has shape [num_gt_boxes, num_classes]. cls_weights: a float32 tensor with shape [num_pred_boxes, num_classes], representing weights for each element in cls_targets. reg_targets: a float32 tensor with shape [num_pred_boxes, box_code_dimension] reg_weights: a float32 tensor with shape [num_pred_boxes] """ if not unmatched_class_label: unmatched_class_label = tf.constant( [1] + [0] * (gt_labels.shape[1] - 1), tf.float32) if gt_weights is None: num_gt_boxes = gt_boxes.num_boxes_static() if not num_gt_boxes: num_gt_boxes = gt_boxes.num_boxes() gt_weights = tf.ones([num_gt_boxes], dtype=tf.float32) gt_boxes.add_field(fields.BoxListFields.classes, gt_labels) pred_boxes.add_field(fields.BoxListFields.classes, pred_classes) match_quality_matrix = self._similarity_calc.compare( gt_boxes, pred_boxes) match = self._matcher.match(match_quality_matrix, valid_rows=tf.greater(gt_weights, 0)) matched_gt_boxes = match.gather_based_on_match( gt_boxes.get(), unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4)) matched_gt_boxlist = box_list.BoxList(matched_gt_boxes) ty, tx, th, tw = matched_gt_boxlist.get_center_coordinates_and_sizes() reg_targets = tf.transpose(tf.stack([ty, tx, th, tw])) cls_targets = match.gather_based_on_match( gt_labels, unmatched_value=unmatched_class_label, ignored_value=unmatched_class_label) reg_weights = match.gather_based_on_match( gt_weights, ignored_value=0., unmatched_value=0.) cls_weights = match.gather_based_on_match( gt_weights, ignored_value=0., unmatched_value=1) # convert cls_weights from per-box_pred to per-class. class_label_shape = tf.shape(cls_targets)[1:] weights_multiple = tf.concat( [tf.constant([1]), class_label_shape], axis=0) cls_weights = tf.expand_dims(cls_weights, -1) cls_weights = tf.tile(cls_weights, weights_multiple) return (cls_targets, cls_weights, reg_targets, reg_weights)
128,840
45.800218
96
py
models
models-master/research/object_detection/core/densepose_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DensePose operations. DensePose part ids are represented as tensors of shape [num_instances, num_points] and coordinates are represented as tensors of shape [num_instances, num_points, 4] where each point holds (y, x, v, u). The location of the DensePose sampled point is (y, x) in normalized coordinates. The surface coordinate (in the part coordinate frame) is (v, u). Note that dim 1 of both tensors may contain padding, since the number of sampled points per instance is not fixed. The value `num_points` represents the maximum number of sampled points for an instance in the example. """ import os import numpy as np import scipy.io import tensorflow.compat.v1 as tf from object_detection.utils import shape_utils PART_NAMES = [ b'torso_back', b'torso_front', b'right_hand', b'left_hand', b'left_foot', b'right_foot', b'right_upper_leg_back', b'left_upper_leg_back', b'right_upper_leg_front', b'left_upper_leg_front', b'right_lower_leg_back', b'left_lower_leg_back', b'right_lower_leg_front', b'left_lower_leg_front', b'left_upper_arm_back', b'right_upper_arm_back', b'left_upper_arm_front', b'right_upper_arm_front', b'left_lower_arm_back', b'right_lower_arm_back', b'left_lower_arm_front', b'right_lower_arm_front', b'right_face', b'left_face', ] def scale(dp_surface_coords, y_scale, x_scale, scope=None): """Scales DensePose coordinates in y and x dimensions. Args: dp_surface_coords: a tensor of shape [num_instances, num_points, 4], with coordinates in (y, x, v, u) format. y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4] """ with tf.name_scope(scope, 'DensePoseScale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) new_keypoints = dp_surface_coords * [[[y_scale, x_scale, 1, 1]]] return new_keypoints def clip_to_window(dp_surface_coords, window, scope=None): """Clips DensePose points to a window. This op clips any input DensePose points to a window. Args: dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with DensePose surface coordinates in (y, x, v, u) format. window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip the keypoints. scope: name scope. Returns: new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4]. """ with tf.name_scope(scope, 'DensePoseClipToWindow'): y, x, v, u = tf.split(value=dp_surface_coords, num_or_size_splits=4, axis=2) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) y = tf.maximum(tf.minimum(y, win_y_max), win_y_min) x = tf.maximum(tf.minimum(x, win_x_max), win_x_min) new_dp_surface_coords = tf.concat([y, x, v, u], 2) return new_dp_surface_coords def prune_outside_window(dp_num_points, dp_part_ids, dp_surface_coords, window, scope=None): """Prunes DensePose points that fall outside a given window. This function replaces points that fall outside the given window with zeros. See also clip_to_window which clips any DensePose points that fall outside the given window. Note that this operation uses dynamic shapes, and therefore is not currently suitable for TPU. Args: dp_num_points: a tensor of shape [num_instances] that indicates how many (non-padded) DensePose points there are per instance. dp_part_ids: a tensor of shape [num_instances, num_points] with DensePose part ids. These part_ids are 0-indexed, where the first non-background part has index 0. dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with DensePose surface coordinates in (y, x, v, u) format. window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window outside of which the op should prune the points. scope: name scope. Returns: new_dp_num_points: a tensor of shape [num_instances] that indicates how many (non-padded) DensePose points there are per instance after pruning. new_dp_part_ids: a tensor of shape [num_instances, num_points] with DensePose part ids. These part_ids are 0-indexed, where the first non-background part has index 0. new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with DensePose surface coordinates after pruning. """ with tf.name_scope(scope, 'DensePosePruneOutsideWindow'): y, x, _, _ = tf.unstack(dp_surface_coords, axis=-1) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) num_instances, num_points = shape_utils.combined_static_and_dynamic_shape( dp_part_ids) dp_num_points_tiled = tf.tile(dp_num_points[:, tf.newaxis], multiples=[1, num_points]) range_tiled = tf.tile(tf.range(num_points)[tf.newaxis, :], multiples=[num_instances, 1]) valid_initial = range_tiled < dp_num_points_tiled valid_in_window = tf.logical_and( tf.logical_and(y >= win_y_min, y <= win_y_max), tf.logical_and(x >= win_x_min, x <= win_x_max)) valid_indices = tf.logical_and(valid_initial, valid_in_window) new_dp_num_points = tf.math.reduce_sum( tf.cast(valid_indices, tf.int32), axis=1) max_num_points = tf.math.reduce_max(new_dp_num_points) def gather_and_reshuffle(elems): dp_part_ids, dp_surface_coords, valid_indices = elems locs = tf.where(valid_indices)[:, 0] valid_part_ids = tf.gather(dp_part_ids, locs, axis=0) valid_part_ids_padded = shape_utils.pad_or_clip_nd( valid_part_ids, output_shape=[max_num_points]) valid_surface_coords = tf.gather(dp_surface_coords, locs, axis=0) valid_surface_coords_padded = shape_utils.pad_or_clip_nd( valid_surface_coords, output_shape=[max_num_points, 4]) return [valid_part_ids_padded, valid_surface_coords_padded] new_dp_part_ids, new_dp_surface_coords = ( shape_utils.static_or_dynamic_map_fn( gather_and_reshuffle, elems=[dp_part_ids, dp_surface_coords, valid_indices], dtype=[tf.int32, tf.float32], back_prop=False)) return new_dp_num_points, new_dp_part_ids, new_dp_surface_coords def change_coordinate_frame(dp_surface_coords, window, scope=None): """Changes coordinate frame of the points to be relative to window's frame. Given a window of the form [y_min, x_min, y_max, x_max] in normalized coordinates, changes DensePose coordinates to be relative to this window. An example use case is data augmentation: where we are given groundtruth points and would like to randomly crop the image to some window. In this case we need to change the coordinate frame of each sampled point to be relative to this new window. Args: dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with DensePose surface coordinates in (y, x, v, u) format. window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window we should change the coordinate frame to. scope: name scope. Returns: new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4]. """ with tf.name_scope(scope, 'DensePoseChangeCoordinateFrame'): win_height = window[2] - window[0] win_width = window[3] - window[1] new_dp_surface_coords = scale( dp_surface_coords - [window[0], window[1], 0, 0], 1.0 / win_height, 1.0 / win_width) return new_dp_surface_coords def to_normalized_coordinates(dp_surface_coords, height, width, check_range=True, scope=None): """Converts absolute DensePose coordinates to normalized in range [0, 1]. This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with DensePose absolute surface coordinates in (y, x, v, u) format. height: Height of image. width: Width of image. check_range: If True, checks if the coordinates are already normalized. scope: name scope. Returns: A tensor of shape [num_instances, num_points, 4] with normalized coordinates. """ with tf.name_scope(scope, 'DensePoseToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(dp_surface_coords[:, :, :2]) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(dp_surface_coords, 1.0 / height, 1.0 / width) def to_absolute_coordinates(dp_surface_coords, height, width, check_range=True, scope=None): """Converts normalized DensePose coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with DensePose normalized surface coordinates in (y, x, v, u) format. height: Height of image. width: Width of image. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: A tensor of shape [num_instances, num_points, 4] with absolute coordinates. """ with tf.name_scope(scope, 'DensePoseToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(dp_surface_coords[:, :, :2]) max_assert = tf.Assert(tf.greater_equal(1.01, max_val), ['maximum coordinate value is larger than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(dp_surface_coords, height, width) class DensePoseHorizontalFlip(object): """Class responsible for horizontal flipping of parts and surface coords.""" def __init__(self): """Constructor.""" path = os.path.dirname(os.path.abspath(__file__)) uv_symmetry_transforms_path = tf.resource_loader.get_path_to_datafile( os.path.join(path, '..', 'dataset_tools', 'densepose', 'UV_symmetry_transforms.mat')) tf.logging.info('Loading DensePose symmetry transforms file from {}'.format( uv_symmetry_transforms_path)) with tf.io.gfile.GFile(uv_symmetry_transforms_path, 'rb') as f: data = scipy.io.loadmat(f) # Create lookup maps which indicate how a VU coordinate changes after a # horizontal flip. uv_symmetry_map = {} for key in ('U_transforms', 'V_transforms'): uv_symmetry_map_per_part = [] for i in range(data[key].shape[1]): # The following tensor has shape [256, 256]. The raw data is stored as # uint8 values, so convert to float and scale to the range [0., 1.] data_normalized = data[key][0, i].astype(np.float32) / 255. map_per_part = tf.constant(data_normalized, dtype=tf.float32) uv_symmetry_map_per_part.append(map_per_part) uv_symmetry_map[key] = tf.reshape( tf.stack(uv_symmetry_map_per_part, axis=0), [-1]) # The following dictionary contains flattened lookup maps for the U and V # coordinates separately. The shape of each is [24 * 256 * 256]. self.uv_symmetries = uv_symmetry_map # Create a list of that maps part index to flipped part index (0-indexed). part_symmetries = [] for i, part_name in enumerate(PART_NAMES): if b'left' in part_name: part_symmetries.append(PART_NAMES.index( part_name.replace(b'left', b'right'))) elif b'right' in part_name: part_symmetries.append(PART_NAMES.index( part_name.replace(b'right', b'left'))) else: part_symmetries.append(i) self.part_symmetries = part_symmetries def flip_parts_and_coords(self, part_ids, vu): """Flips part ids and coordinates. Args: part_ids: a [num_instances, num_points] int32 tensor with pre-flipped part ids. These part_ids are 0-indexed, where the first non-background part has index 0. vu: a [num_instances, num_points, 2] float32 tensor with pre-flipped vu normalized coordinates. Returns: new_part_ids: a [num_instances, num_points] int32 tensor with post-flipped part ids. These part_ids are 0-indexed, where the first non-background part has index 0. new_vu: a [num_instances, num_points, 2] float32 tensor with post-flipped vu coordinates. """ num_instances, num_points = shape_utils.combined_static_and_dynamic_shape( part_ids) part_ids_flattened = tf.reshape(part_ids, [-1]) new_part_ids_flattened = tf.gather(self.part_symmetries, part_ids_flattened) new_part_ids = tf.reshape(new_part_ids_flattened, [num_instances, num_points]) # Convert VU floating point coordinates to values in [256, 256] grid. vu = tf.math.minimum(tf.math.maximum(vu, 0.0), 1.0) vu_locs = tf.cast(vu * 256., dtype=tf.int32) vu_locs_flattened = tf.reshape(vu_locs, [-1, 2]) v_locs_flattened, u_locs_flattened = tf.unstack(vu_locs_flattened, axis=1) # Convert vu_locs into lookup indices (in flattened part symmetries map). symmetry_lookup_inds = ( part_ids_flattened * 65536 + 256 * v_locs_flattened + u_locs_flattened) # New VU coordinates. v_new = tf.gather(self.uv_symmetries['V_transforms'], symmetry_lookup_inds) u_new = tf.gather(self.uv_symmetries['U_transforms'], symmetry_lookup_inds) new_vu_flattened = tf.stack([v_new, u_new], axis=1) new_vu = tf.reshape(new_vu_flattened, [num_instances, num_points, 2]) return new_part_ids, new_vu def flip_horizontal(dp_part_ids, dp_surface_coords, scope=None): """Flips the DensePose points horizontally around the flip_point. This operation flips dense pose annotations horizontally. Note that part ids and surface coordinates may or may not change as a result of the flip. Args: dp_part_ids: a tensor of shape [num_instances, num_points] with DensePose part ids. These part_ids are 0-indexed, where the first non-background part has index 0. dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with DensePose surface coordinates in (y, x, v, u) normalized format. scope: name scope. Returns: new_dp_part_ids: a tensor of shape [num_instances, num_points] with DensePose part ids after flipping. new_dp_surface_coords: a tensor of shape [num_instances, num_points, 4] with DensePose surface coordinates after flipping. """ with tf.name_scope(scope, 'DensePoseFlipHorizontal'): # First flip x coordinate. y, x, vu = tf.split(dp_surface_coords, num_or_size_splits=[1, 1, 2], axis=2) xflipped = 1.0 - x # Flip part ids and surface coordinates. horizontal_flip = DensePoseHorizontalFlip() new_dp_part_ids, new_vu = horizontal_flip.flip_parts_and_coords( dp_part_ids, vu) new_dp_surface_coords = tf.concat([y, xflipped, new_vu], axis=2) return new_dp_part_ids, new_dp_surface_coords
16,315
41.824147
80
py
models
models-master/research/object_detection/core/data_parser.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Interface for data parsers. Data parser parses input data and returns a dictionary of numpy arrays keyed by the entries in standard_fields.py. Since the parser parses records to numpy arrays (materialized tensors) directly, it is used to read data for evaluation/visualization; to parse the data during training, DataDecoder should be used. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import ABCMeta from abc import abstractmethod import six class DataToNumpyParser(six.with_metaclass(ABCMeta, object)): """Abstract interface for data parser that produces numpy arrays.""" @abstractmethod def parse(self, input_data): """Parses input and returns a numpy array or a dictionary of numpy arrays. Args: input_data: an input data Returns: A numpy array or a dictionary of numpy arrays or None, if input cannot be parsed. """ pass
1,645
34.782609
80
py
models
models-master/research/object_detection/core/prefetcher.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides functions to prefetch tensors to feed into models.""" import tensorflow.compat.v1 as tf def prefetch(tensor_dict, capacity): """Creates a prefetch queue for tensors. Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a dequeue op that evaluates to a tensor_dict. This function is useful in prefetching preprocessed tensors so that the data is readily available for consumers. Example input pipeline when you don't need batching: ---------------------------------------------------- key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20) tensor_dict = prefetch_queue.dequeue() outputs = Model(tensor_dict) ... ---------------------------------------------------- For input pipelines with batching, refer to core/batcher.py Args: tensor_dict: a dictionary of tensors to prefetch. capacity: the size of the prefetch queue. Returns: a FIFO prefetcher queue """ names = list(tensor_dict.keys()) dtypes = [t.dtype for t in tensor_dict.values()] shapes = [t.get_shape() for t in tensor_dict.values()] prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, shapes=shapes, names=names, name='prefetch_queue') enqueue_op = prefetch_queue.enqueue(tensor_dict) tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( prefetch_queue, [enqueue_op])) tf.summary.scalar( 'queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity), tf.cast(prefetch_queue.size(), dtype=tf.float32) * (1. / capacity)) return prefetch_queue
2,536
39.919355
80
py
models
models-master/research/object_detection/core/matcher.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Matcher interface and Match class. This module defines the Matcher interface and the Match object. The job of the matcher is to match row and column indices based on the similarity matrix and other optional parameters. Each column is matched to at most one row. There are three possibilities for the matching: 1) match: A column matches a row. 2) no_match: A column does not match any row. 3) ignore: A column that is neither 'match' nor no_match. The ignore case is regularly encountered in object detection: when an anchor has a relatively small overlap with a ground-truth box, one neither wants to consider this box a positive example (match) nor a negative example (no match). The Match class is used to store the match results and it provides simple apis to query the results. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six import tensorflow.compat.v1 as tf from object_detection.utils import ops class Match(object): """Class to store results from the matcher. This class is used to store the results from the matcher. It provides convenient methods to query the matching results. """ def __init__(self, match_results, use_matmul_gather=False): """Constructs a Match object. Args: match_results: Integer tensor of shape [N] with (1) match_results[i]>=0, meaning that column i is matched with row match_results[i]. (2) match_results[i]=-1, meaning that column i is not matched. (3) match_results[i]=-2, meaning that column i is ignored. use_matmul_gather: Use matrix multiplication based gather instead of standard tf.gather. (Default: False). Raises: ValueError: if match_results does not have rank 1 or is not an integer int32 scalar tensor """ if match_results.shape.ndims != 1: raise ValueError('match_results should have rank 1') if match_results.dtype != tf.int32: raise ValueError('match_results should be an int32 or int64 scalar ' 'tensor') self._match_results = match_results self._gather_op = tf.gather if use_matmul_gather: self._gather_op = ops.matmul_gather_on_zeroth_axis @property def match_results(self): """The accessor for match results. Returns: the tensor which encodes the match results. """ return self._match_results def matched_column_indices(self): """Returns column indices that match to some row. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1))) def matched_column_indicator(self): """Returns column indices that are matched. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return tf.greater_equal(self._match_results, 0) def num_matched_columns(self): """Returns number (int32 scalar tensor) of matched columns.""" return tf.size(self.matched_column_indices()) def unmatched_column_indices(self): """Returns column indices that do not match any row. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1))) def unmatched_column_indicator(self): """Returns column indices that are unmatched. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return tf.equal(self._match_results, -1) def num_unmatched_columns(self): """Returns number (int32 scalar tensor) of unmatched columns.""" return tf.size(self.unmatched_column_indices()) def ignored_column_indices(self): """Returns column indices that are ignored (neither Matched nor Unmatched). The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(self.ignored_column_indicator())) def ignored_column_indicator(self): """Returns boolean column indicator where True means the colum is ignored. Returns: column_indicator: boolean vector which is True for all ignored column indices. """ return tf.equal(self._match_results, -2) def num_ignored_columns(self): """Returns number (int32 scalar tensor) of matched columns.""" return tf.size(self.ignored_column_indices()) def unmatched_or_ignored_column_indices(self): """Returns column indices that are unmatched or ignored. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results))) def matched_row_indices(self): """Returns row indices that match some column. The indices returned by this op are ordered so as to be in correspondence with the output of matched_column_indicator(). For example if self.matched_column_indicator() is [0,2], and self.matched_row_indices() is [7, 3], then we know that column 0 was matched to row 7 and column 2 was matched to row 3. Returns: row_indices: int32 tensor of shape [K] with row indices. """ return self._reshape_and_cast( self._gather_op(tf.cast(self._match_results, dtype=tf.float32), self.matched_column_indices())) def num_matched_rows(self): """Returns number (int32 scalar tensor) of matched rows.""" unique_rows, _ = tf.unique(self.matched_row_indices()) return tf.size(unique_rows) def _reshape_and_cast(self, t): return tf.cast(tf.reshape(t, [-1]), tf.int32) def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value): """Gathers elements from `input_tensor` based on match results. For columns that are matched to a row, gathered_tensor[col] is set to input_tensor[match_results[col]]. For columns that are unmatched, gathered_tensor[col] is set to unmatched_value. Finally, for columns that are ignored gathered_tensor[col] is set to ignored_value. Note that the input_tensor.shape[1:] must match with unmatched_value.shape and ignored_value.shape Args: input_tensor: Tensor to gather values from. unmatched_value: Constant tensor value for unmatched columns. ignored_value: Constant tensor value for ignored columns. Returns: gathered_tensor: A tensor containing values gathered from input_tensor. The shape of the gathered tensor is [match_results.shape[0]] + input_tensor.shape[1:]. """ input_tensor = tf.concat( [tf.stack([ignored_value, unmatched_value]), input_tensor], axis=0) gather_indices = tf.maximum(self.match_results + 2, 0) gathered_tensor = self._gather_op(input_tensor, gather_indices) return gathered_tensor class Matcher(six.with_metaclass(abc.ABCMeta, object)): """Abstract base class for matcher. """ def __init__(self, use_matmul_gather=False): """Constructs a Matcher. Args: use_matmul_gather: Force constructed match objects to use matrix multiplication based gather instead of standard tf.gather. (Default: False). """ self._use_matmul_gather = use_matmul_gather def match(self, similarity_matrix, valid_rows=None, scope=None): """Computes matches among row and column indices and returns the result. Computes matches among the row and column indices based on the similarity matrix and optional arguments. Args: similarity_matrix: Float tensor of shape [N, M] with pairwise similarity where higher value means more similar. valid_rows: A boolean tensor of shape [N] indicating the rows that are valid for matching. scope: Op scope name. Defaults to 'Match' if None. Returns: A Match object with the results of matching. """ with tf.name_scope(scope, 'Match') as scope: if valid_rows is None: valid_rows = tf.ones(tf.shape(similarity_matrix)[0], dtype=tf.bool) return Match(self._match(similarity_matrix, valid_rows), self._use_matmul_gather) @abc.abstractmethod def _match(self, similarity_matrix, valid_rows): """Method to be overridden by implementations. Args: similarity_matrix: Float tensor of shape [N, M] with pairwise similarity where higher value means more similar. valid_rows: A boolean tensor of shape [N] indicating the rows that are valid for matching. Returns: match_results: Integer tensor of shape [M]: match_results[i]>=0 means that column i is matched to row match_results[i], match_results[i]=-1 means that the column is not matched. match_results[i]=-2 means that the column is ignored (usually this happens when there is a very weak match which one neither wants as positive nor negative example). """ pass
9,959
35.752768
80
py
models
models-master/research/object_detection/core/standard_fields.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains classes specifying naming conventions used for object detection. Specifies: InputDataFields: standard fields used by reader/preprocessor/batcher. DetectionResultFields: standard fields returned by object detector. BoxListFields: standard field used by BoxList TfExampleFields: standard fields for tf-example data format (go/tf-example). """ class InputDataFields(object): """Names for the input tensors. Holds the standard data field names to use for identifying input tensors. This should be used by the decoder to identify keys for the returned tensor_dict containing input tensors. And it should be used by the model to identify the tensors it needs. Attributes: image: image. image_additional_channels: additional channels. original_image: image in the original input size. original_image_spatial_shape: image in the original input size. key: unique key corresponding to image. source_id: source of the original image. filename: original filename of the dataset (without common path). groundtruth_image_classes: image-level class labels. groundtruth_image_confidences: image-level class confidences. groundtruth_labeled_classes: image-level annotation that indicates the classes for which an image has been labeled. groundtruth_boxes: coordinates of the ground truth boxes in the image. groundtruth_classes: box-level class labels. groundtruth_track_ids: box-level track ID labels. groundtruth_temporal_offset: box-level temporal offsets, i.e., movement of the box center in adjacent frames. groundtruth_track_match_flags: box-level flags indicating if objects exist in the previous frame. groundtruth_confidences: box-level class confidences. The shape should be the same as the shape of groundtruth_classes. groundtruth_label_types: box-level label types (e.g. explicit negative). groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead] is the groundtruth a single object or a crowd. groundtruth_area: area of a groundtruth segment. groundtruth_difficult: is a `difficult` object groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the same class, forming a connected group, where instances are heavily occluding each other. proposal_boxes: coordinates of object proposal boxes. proposal_objectness: objectness score of each proposal. groundtruth_instance_masks: ground truth instance masks. groundtruth_instance_mask_weights: ground truth instance masks weights. groundtruth_instance_boundaries: ground truth instance boundaries. groundtruth_instance_classes: instance mask-level class labels. groundtruth_keypoints: ground truth keypoints. groundtruth_keypoint_depths: Relative depth of the keypoints. groundtruth_keypoint_depth_weights: Weights of the relative depth of the keypoints. groundtruth_keypoint_visibilities: ground truth keypoint visibilities. groundtruth_keypoint_weights: groundtruth weight factor for keypoints. groundtruth_label_weights: groundtruth label weights. groundtruth_verified_negative_classes: groundtruth verified negative classes groundtruth_not_exhaustive_classes: groundtruth not-exhaustively labeled classes. groundtruth_weights: groundtruth weight factor for bounding boxes. groundtruth_dp_num_points: The number of DensePose sampled points for each instance. groundtruth_dp_part_ids: Part indices for DensePose points. groundtruth_dp_surface_coords: Image locations and UV coordinates for DensePose points. num_groundtruth_boxes: number of groundtruth boxes. is_annotated: whether an image has been labeled or not. true_image_shapes: true shapes of images in the resized images, as resized images can be padded with zeros. multiclass_scores: the label score per class for each box. context_features: a flattened list of contextual features. context_feature_length: the fixed length of each feature in context_features, used for reshaping. valid_context_size: the valid context size, used in filtering the padded context features. context_features_image_id_list: the list of image source ids corresponding to the features in context_features image_format: format for the images, used to decode image_height: height of images, used to decode image_width: width of images, used to decode """ image = 'image' image_additional_channels = 'image_additional_channels' original_image = 'original_image' original_image_spatial_shape = 'original_image_spatial_shape' key = 'key' source_id = 'source_id' filename = 'filename' groundtruth_image_classes = 'groundtruth_image_classes' groundtruth_image_confidences = 'groundtruth_image_confidences' groundtruth_labeled_classes = 'groundtruth_labeled_classes' groundtruth_boxes = 'groundtruth_boxes' groundtruth_classes = 'groundtruth_classes' groundtruth_track_ids = 'groundtruth_track_ids' groundtruth_temporal_offset = 'groundtruth_temporal_offset' groundtruth_track_match_flags = 'groundtruth_track_match_flags' groundtruth_confidences = 'groundtruth_confidences' groundtruth_label_types = 'groundtruth_label_types' groundtruth_is_crowd = 'groundtruth_is_crowd' groundtruth_area = 'groundtruth_area' groundtruth_difficult = 'groundtruth_difficult' groundtruth_group_of = 'groundtruth_group_of' proposal_boxes = 'proposal_boxes' proposal_objectness = 'proposal_objectness' groundtruth_instance_masks = 'groundtruth_instance_masks' groundtruth_instance_mask_weights = 'groundtruth_instance_mask_weights' groundtruth_instance_boundaries = 'groundtruth_instance_boundaries' groundtruth_instance_classes = 'groundtruth_instance_classes' groundtruth_keypoints = 'groundtruth_keypoints' groundtruth_keypoint_depths = 'groundtruth_keypoint_depths' groundtruth_keypoint_depth_weights = 'groundtruth_keypoint_depth_weights' groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities' groundtruth_keypoint_weights = 'groundtruth_keypoint_weights' groundtruth_label_weights = 'groundtruth_label_weights' groundtruth_verified_neg_classes = 'groundtruth_verified_neg_classes' groundtruth_not_exhaustive_classes = 'groundtruth_not_exhaustive_classes' groundtruth_weights = 'groundtruth_weights' groundtruth_dp_num_points = 'groundtruth_dp_num_points' groundtruth_dp_part_ids = 'groundtruth_dp_part_ids' groundtruth_dp_surface_coords = 'groundtruth_dp_surface_coords' num_groundtruth_boxes = 'num_groundtruth_boxes' is_annotated = 'is_annotated' true_image_shape = 'true_image_shape' multiclass_scores = 'multiclass_scores' context_features = 'context_features' context_feature_length = 'context_feature_length' valid_context_size = 'valid_context_size' context_features_image_id_list = 'context_features_image_id_list' image_timestamps = 'image_timestamps' image_format = 'image_format' image_height = 'image_height' image_width = 'image_width' class DetectionResultFields(object): """Naming conventions for storing the output of the detector. Attributes: source_id: source of the original image. key: unique key corresponding to image. detection_boxes: coordinates of the detection boxes in the image. detection_scores: detection scores for the detection boxes in the image. detection_multiclass_scores: class score distribution (including background) for detection boxes in the image including background class. detection_classes: detection-level class labels. detection_masks: contains a segmentation mask for each detection box. detection_surface_coords: contains DensePose surface coordinates for each box. detection_boundaries: contains an object boundary for each detection box. detection_keypoints: contains detection keypoints for each detection box. detection_keypoint_scores: contains detection keypoint scores. detection_keypoint_depths: contains detection keypoint depths. num_detections: number of detections in the batch. raw_detection_boxes: contains decoded detection boxes without Non-Max suppression. raw_detection_scores: contains class score logits for raw detection boxes. detection_anchor_indices: The anchor indices of the detections after NMS. detection_features: contains extracted features for each detected box after NMS. """ source_id = 'source_id' key = 'key' detection_boxes = 'detection_boxes' detection_scores = 'detection_scores' detection_multiclass_scores = 'detection_multiclass_scores' detection_features = 'detection_features' detection_classes = 'detection_classes' detection_masks = 'detection_masks' detection_surface_coords = 'detection_surface_coords' detection_boundaries = 'detection_boundaries' detection_keypoints = 'detection_keypoints' detection_keypoint_scores = 'detection_keypoint_scores' detection_keypoint_depths = 'detection_keypoint_depths' detection_embeddings = 'detection_embeddings' detection_offsets = 'detection_temporal_offsets' num_detections = 'num_detections' raw_detection_boxes = 'raw_detection_boxes' raw_detection_scores = 'raw_detection_scores' detection_anchor_indices = 'detection_anchor_indices' class BoxListFields(object): """Naming conventions for BoxLists. Attributes: boxes: bounding box coordinates. classes: classes per bounding box. scores: scores per bounding box. weights: sample weights per bounding box. objectness: objectness score per bounding box. masks: masks per bounding box. mask_weights: mask weights for each bounding box. boundaries: boundaries per bounding box. keypoints: keypoints per bounding box. keypoint_visibilities: keypoint visibilities per bounding box. keypoint_heatmaps: keypoint heatmaps per bounding box. keypoint_depths: keypoint depths per bounding box. keypoint_depth_weights: keypoint depth weights per bounding box. densepose_num_points: number of DensePose points per bounding box. densepose_part_ids: DensePose part ids per bounding box. densepose_surface_coords: DensePose surface coordinates per bounding box. is_crowd: is_crowd annotation per bounding box. temporal_offsets: temporal center offsets per bounding box. track_match_flags: match flags per bounding box. """ boxes = 'boxes' classes = 'classes' scores = 'scores' weights = 'weights' confidences = 'confidences' objectness = 'objectness' masks = 'masks' mask_weights = 'mask_weights' boundaries = 'boundaries' keypoints = 'keypoints' keypoint_visibilities = 'keypoint_visibilities' keypoint_heatmaps = 'keypoint_heatmaps' keypoint_depths = 'keypoint_depths' keypoint_depth_weights = 'keypoint_depth_weights' densepose_num_points = 'densepose_num_points' densepose_part_ids = 'densepose_part_ids' densepose_surface_coords = 'densepose_surface_coords' is_crowd = 'is_crowd' group_of = 'group_of' track_ids = 'track_ids' temporal_offsets = 'temporal_offsets' track_match_flags = 'track_match_flags' class PredictionFields(object): """Naming conventions for standardized prediction outputs. Attributes: feature_maps: List of feature maps for prediction. anchors: Generated anchors. raw_detection_boxes: Decoded detection boxes without NMS. raw_detection_feature_map_indices: Feature map indices from which each raw detection box was produced. """ feature_maps = 'feature_maps' anchors = 'anchors' raw_detection_boxes = 'raw_detection_boxes' raw_detection_feature_map_indices = 'raw_detection_feature_map_indices' class TfExampleFields(object): """TF-example proto feature names for object detection. Holds the standard feature names to load from an Example proto for object detection. Attributes: image_encoded: JPEG encoded string image_format: image format, e.g. "JPEG" filename: filename channels: number of channels of image colorspace: colorspace, e.g. "RGB" height: height of image in pixels, e.g. 462 width: width of image in pixels, e.g. 581 source_id: original source of the image image_class_text: image-level label in text format image_class_label: image-level label in numerical format image_class_confidence: image-level confidence of the label object_class_text: labels in text format, e.g. ["person", "cat"] object_class_label: labels in numbers, e.g. [16, 8] object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30 object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40 object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50 object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70 object_view: viewpoint of object, e.g. ["frontal", "left"] object_truncated: is object truncated, e.g. [true, false] object_occluded: is object occluded, e.g. [true, false] object_difficult: is object difficult, e.g. [true, false] object_group_of: is object a single object or a group of objects object_depiction: is object a depiction object_is_crowd: [DEPRECATED, use object_group_of instead] is the object a single object or a crowd object_segment_area: the area of the segment. object_weight: a weight factor for the object's bounding box. instance_masks: instance segmentation masks. instance_boundaries: instance boundaries. instance_classes: Classes for each instance segmentation mask. detection_class_label: class label in numbers. detection_bbox_ymin: ymin coordinates of a detection box. detection_bbox_xmin: xmin coordinates of a detection box. detection_bbox_ymax: ymax coordinates of a detection box. detection_bbox_xmax: xmax coordinates of a detection box. detection_score: detection score for the class label and box. """ image_encoded = 'image/encoded' image_format = 'image/format' # format is reserved keyword filename = 'image/filename' channels = 'image/channels' colorspace = 'image/colorspace' height = 'image/height' width = 'image/width' source_id = 'image/source_id' image_class_text = 'image/class/text' image_class_label = 'image/class/label' image_class_confidence = 'image/class/confidence' object_class_text = 'image/object/class/text' object_class_label = 'image/object/class/label' object_bbox_ymin = 'image/object/bbox/ymin' object_bbox_xmin = 'image/object/bbox/xmin' object_bbox_ymax = 'image/object/bbox/ymax' object_bbox_xmax = 'image/object/bbox/xmax' object_view = 'image/object/view' object_truncated = 'image/object/truncated' object_occluded = 'image/object/occluded' object_difficult = 'image/object/difficult' object_group_of = 'image/object/group_of' object_depiction = 'image/object/depiction' object_is_crowd = 'image/object/is_crowd' object_segment_area = 'image/object/segment/area' object_weight = 'image/object/weight' instance_masks = 'image/segmentation/object' instance_boundaries = 'image/boundaries/object' instance_classes = 'image/segmentation/object/class' detection_class_label = 'image/detection/label' detection_bbox_ymin = 'image/detection/bbox/ymin' detection_bbox_xmin = 'image/detection/bbox/xmin' detection_bbox_ymax = 'image/detection/bbox/ymax' detection_bbox_xmax = 'image/detection/bbox/xmax' detection_score = 'image/detection/score' # Sequence fields for SequenceExample inputs. # All others are considered context fields. SEQUENCE_FIELDS = [InputDataFields.image, InputDataFields.source_id, InputDataFields.groundtruth_boxes, InputDataFields.num_groundtruth_boxes, InputDataFields.groundtruth_classes, InputDataFields.groundtruth_weights, InputDataFields.source_id, InputDataFields.is_annotated]
16,699
45.778711
80
py
models
models-master/research/object_detection/core/target_assigner_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.target_assigner.""" from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf from object_detection.box_coders import keypoint_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.core import box_list from object_detection.core import region_similarity_calculator from object_detection.core import standard_fields as fields from object_detection.core import target_assigner as targetassigner from object_detection.matchers import argmax_matcher from object_detection.utils import np_box_ops from object_detection.utils import test_case from object_detection.utils import tf_version class TargetAssignerTest(test_case.TestCase): def test_assign_agnostic(self): def graph_fn(anchor_means, groundtruth_box_corners): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]], dtype=np.float32) exp_cls_targets = [[1], [1], [0]] exp_cls_weights = [[1], [1], [1]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, -1, 1], [0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) def test_assign_class_agnostic_with_ignored_matches(self): # Note: test is very similar to above. The third box matched with an IOU # of 0.35, which is between the matched and unmatched threshold. This means # That like above the expected classification targets are [1, 1, 0]. # Unlike above, the third target is ignored and therefore expected # classification weights are [1, 1, 0]. def graph_fn(anchor_means, groundtruth_box_corners): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.3) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0.0, 0.5, .9, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]], dtype=np.float32) exp_cls_targets = [[1], [1], [0]] exp_cls_weights = [[1], [1], [0]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, -1, 1], [0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) def test_assign_agnostic_with_keypoints(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_keypoints): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = keypoint_box_coder.KeypointBoxCoder( num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0]) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) groundtruth_boxlist.add_field(fields.BoxListFields.keypoints, groundtruth_keypoints) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.0, 0.5, .9, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.45, 0.45, 0.95, 0.95]], dtype=np.float32) groundtruth_keypoints = np.array( [[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]], dtype=np.float32) exp_cls_targets = [[1], [1], [0]] exp_cls_weights = [[1], [1], [1]] exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13, -5], [-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11, -11, -7], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_keypoints]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self): # Note: test is very similar to above. The third box matched with an IOU # of 0.35, which is between the matched and unmatched threshold. This means # That like above the expected classification targets are [1, 1, 0]. # Unlike above, the third target is ignored and therefore expected # classification weights are [1, 1, 0]. def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_keypoints): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = keypoint_box_coder.KeypointBoxCoder( num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0]) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) groundtruth_boxlist.add_field(fields.BoxListFields.keypoints, groundtruth_keypoints) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.0, 0.5, .9, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.45, 0.45, 0.95, 0.95]], dtype=np.float32) groundtruth_keypoints = np.array( [[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]], dtype=np.float32) exp_cls_targets = [[1], [1], [0]] exp_cls_weights = [[1], [1], [1]] exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13, -5], [-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11, -11, -7], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_keypoints]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) def test_assign_multiclass(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, groundtruth_labels, unmatched_class_label=unmatched_class_label) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]], dtype=np.float32) groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]], dtype=np.float32) exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]] exp_cls_weights = [[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, -1, 1], [0, 0, 0, 0], [0, 0, -.5, .2]] exp_reg_weights = [1, 1, 0, 1] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) def test_assign_multiclass_with_groundtruth_weights(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels, groundtruth_weights): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, groundtruth_labels, unmatched_class_label=unmatched_class_label, groundtruth_weights=groundtruth_weights) (_, cls_weights, _, reg_weights, _) = result return (cls_weights, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]], dtype=np.float32) groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]], dtype=np.float32) groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32) # background class gets weight of 1. exp_cls_weights = [[0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3], [0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]] exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0. (cls_weights_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_box_corners, groundtruth_labels, groundtruth_weights ]) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_assign_multidimensional_class_targets(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) unmatched_class_label = tf.constant([[0, 0], [0, 0]], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, groundtruth_labels, unmatched_class_label=unmatched_class_label) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]], dtype=np.float32) groundtruth_labels = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]], [[0, 1], [1, .5]]], np.float32) exp_cls_targets = [[[0, 1], [1, 0]], [[1, 0], [0, 1]], [[0, 0], [0, 0]], [[0, 1], [1, .5]]] exp_cls_weights = [[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, -1, 1], [0, 0, 0, 0], [0, 0, -.5, .2]] exp_reg_weights = [1, 1, 0, 1] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) def test_assign_empty_groundtruth(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) unmatched_class_label = tf.constant([0, 0, 0], tf.float32) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, groundtruth_labels, unmatched_class_label=unmatched_class_label) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) groundtruth_labels = np.zeros((0, 3), dtype=np.float32) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]], dtype=np.float32) exp_cls_targets = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]] exp_cls_weights = [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] exp_reg_weights = [0, 0, 0, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self): similarity_calc = region_similarity_calculator.NegSqDistSimilarity() matcher = argmax_matcher.ArgMaxMatcher(0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder() unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]]) priors = box_list.BoxList(prior_means) box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.8], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]] boxes = box_list.BoxList(tf.constant(box_corners)) groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]], tf.float32) with self.assertRaisesRegexp(ValueError, 'Unequal shapes'): target_assigner.assign( priors, boxes, groundtruth_labels, unmatched_class_label=unmatched_class_label) def test_raises_error_on_invalid_groundtruth_labels(self): similarity_calc = region_similarity_calculator.NegSqDistSimilarity() matcher = argmax_matcher.ArgMaxMatcher(0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=1.0) unmatched_class_label = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]]) priors = box_list.BoxList(prior_means) box_corners = [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]] boxes = box_list.BoxList(tf.constant(box_corners)) groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32) with self.assertRaises(ValueError): target_assigner.assign( priors, boxes, groundtruth_labels, unmatched_class_label=unmatched_class_label) class BatchTargetAssignerTest(test_case.TestCase): def _get_target_assigner(self): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder) def test_batch_assign_targets(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_targets = [None, None] anchors_boxlist = box_list.BoxList(anchor_means) agnostic_target_assigner = self._get_target_assigner() (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( agnostic_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[1], [0], [0], [0]], [[0], [1], [1], [0]]] exp_cls_weights = [[[1], [1], [1], [1]], [[1], [1], [1], [1]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_multiclass_targets(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_targets = [class_targets1, class_targets2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets, unmatched_class_label) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_multiclass_targets_with_padded_groundtruth(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2, groundtruth_weights1, groundtruth_weights2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_targets = [class_targets1, class_targets2] gt_weights = [groundtruth_weights1, groundtruth_weights2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets, unmatched_class_label, gt_weights) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2], [0., 0., 0., 0.]], dtype=np.float32) groundtruth_weights1 = np.array([1, 0], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842], [0, 0, 0, 0]], dtype=np.float32) groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 0]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2, groundtruth_weights1, groundtruth_weights2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_multidimensional_targets(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_targets = [class_targets1, class_targets2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() target_dimensions = (2, 3) unmatched_class_label = tf.constant(np.zeros(target_dimensions), tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets, unmatched_class_label) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) class_targets1 = np.array([[[0, 1, 1], [1, 1, 0]]], dtype=np.float32) class_targets2 = np.array([[[0, 1, 1], [1, 1, 0]], [[0, 0, 1], [0, 0, 1]]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[[0., 1., 1.], [1., 1., 0.]], [[0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.]]], [[[0., 0., 0.], [0., 0., 0.]], [[0., 1., 1.], [1., 1., 0.]], [[0., 0., 1.], [0., 0., 1.]], [[0., 0., 0.], [0., 0., 0.]]]] exp_cls_weights = [[[[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]]], [[[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_empty_groundtruth(self): def graph_fn(anchor_means, groundtruth_box_corners, gt_class_targets): groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) gt_box_batch = [groundtruth_boxlist] gt_class_targets_batch = [gt_class_targets] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets_batch, unmatched_class_label) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1]], dtype=np.float32) exp_cls_targets = [[[1, 0, 0, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 1, 1], [1, 1, 1, 1]]] exp_reg_targets = [[[0, 0, 0, 0], [0, 0, 0, 0]]] exp_reg_weights = [[0, 0]] num_classes = 3 pad = 1 gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32) (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, gt_class_targets]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) class BatchGetTargetsTest(test_case.TestCase): def test_scalar_targets(self): batch_match = np.array([[1, 0, 1], [-2, -1, 1]], dtype=np.int32) groundtruth_tensors_list = np.array([[11, 12], [13, 14]], dtype=np.int32) groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]], dtype=np.float32) unmatched_value = np.array(99, dtype=np.int32) unmatched_weight = np.array(0.0, dtype=np.float32) def graph_fn(batch_match, groundtruth_tensors_list, groundtruth_weights_list, unmatched_value, unmatched_weight): targets, weights = targetassigner.batch_get_targets( batch_match, tf.unstack(groundtruth_tensors_list), tf.unstack(groundtruth_weights_list), unmatched_value, unmatched_weight) return (targets, weights) (targets_np, weights_np) = self.execute(graph_fn, [ batch_match, groundtruth_tensors_list, groundtruth_weights_list, unmatched_value, unmatched_weight ]) self.assertAllEqual([[12, 11, 12], [99, 99, 14]], targets_np) self.assertAllClose([[1.0, 1.0, 1.0], [0.0, 0.0, 0.5]], weights_np) def test_1d_targets(self): batch_match = np.array([[1, 0, 1], [-2, -1, 1]], dtype=np.int32) groundtruth_tensors_list = np.array([[[11, 12], [12, 13]], [[13, 14], [14, 15]]], dtype=np.float32) groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]], dtype=np.float32) unmatched_value = np.array([99, 99], dtype=np.float32) unmatched_weight = np.array(0.0, dtype=np.float32) def graph_fn(batch_match, groundtruth_tensors_list, groundtruth_weights_list, unmatched_value, unmatched_weight): targets, weights = targetassigner.batch_get_targets( batch_match, tf.unstack(groundtruth_tensors_list), tf.unstack(groundtruth_weights_list), unmatched_value, unmatched_weight) return (targets, weights) (targets_np, weights_np) = self.execute(graph_fn, [ batch_match, groundtruth_tensors_list, groundtruth_weights_list, unmatched_value, unmatched_weight ]) self.assertAllClose([[[12, 13], [11, 12], [12, 13]], [[99, 99], [99, 99], [14, 15]]], targets_np) self.assertAllClose([[1.0, 1.0, 1.0], [0.0, 0.0, 0.5]], weights_np) class BatchTargetAssignConfidencesTest(test_case.TestCase): def _get_target_assigner(self): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder) def test_batch_assign_empty_groundtruth(self): def graph_fn(anchor_means, groundtruth_box_corners, gt_class_confidences): groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) gt_box_batch = [groundtruth_boxlist] gt_class_confidences_batch = [gt_class_confidences] anchors_boxlist = box_list.BoxList(anchor_means) num_classes = 3 implicit_class_weight = 0.5 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) multiclass_target_assigner = self._get_target_assigner() (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, unmatched_class_label=unmatched_class_label, include_background_class=True, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1]], dtype=np.float32) num_classes = 3 pad = 1 gt_class_confidences = np.zeros((0, num_classes + pad), dtype=np.float32) exp_cls_targets = [[[1, 0, 0, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]]] exp_reg_targets = [[[0, 0, 0, 0], [0, 0, 0, 0]]] exp_reg_weights = [[0, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, gt_class_confidences]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_confidences_agnostic(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_confidences_batch = [None, None] anchors_boxlist = box_list.BoxList(anchor_means) agnostic_target_assigner = self._get_target_assigner() implicit_class_weight = 0.5 (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( agnostic_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, include_background_class=False, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[1], [0], [0], [0]], [[0], [1], [1], [0]]] exp_cls_weights = [[[1], [0.5], [0.5], [0.5]], [[0.5], [1], [1], [0.5]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_confidences_multiclass(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_confidences_batch = [class_targets1, class_targets2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 implicit_class_weight = 0.5 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, unmatched_class_label=unmatched_class_label, include_background_class=True, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, -1, 0]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]], [[0.5, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 1], [0.5, 0.5, 1, 0.5], [0.5, 0.5, 0.5, 0.5]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 0, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_confidences_multiclass_with_padded_groundtruth(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2, groundtruth_weights1, groundtruth_weights2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_confidences_batch = [class_targets1, class_targets2] gt_weights = [groundtruth_weights1, groundtruth_weights2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) implicit_class_weight = 0.5 (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, gt_weights, unmatched_class_label=unmatched_class_label, include_background_class=True, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2], [0., 0., 0., 0.]], dtype=np.float32) groundtruth_weights1 = np.array([1, 0], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842], [0, 0, 0, 0]], dtype=np.float32) groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, -1, 0], [0, 0, 0, 0]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]], [[0.5, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 1], [0.5, 0.5, 1, 0.5], [0.5, 0.5, 0.5, 0.5]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 0, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2, groundtruth_weights1, groundtruth_weights2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_confidences_multidimensional(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_confidences_batch = [class_targets1, class_targets2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() target_dimensions = (2, 3) unmatched_class_label = tf.constant(np.zeros(target_dimensions), tf.float32) implicit_class_weight = 0.5 (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, unmatched_class_label=unmatched_class_label, include_background_class=True, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.float32) class_targets1 = np.array([[[0, 1, 1], [1, 1, 0]]], dtype=np.float32) class_targets2 = np.array([[[0, 1, 1], [1, 1, 0]], [[0, 0, 1], [0, 0, 1]]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) with self.assertRaises(ValueError): _, _, _, _ = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2 ]) class CreateTargetAssignerTest(test_case.TestCase): def test_create_target_assigner(self): """Tests that named constructor gives working target assigners. TODO(rathodv): Make this test more general. """ corners = [[0.0, 0.0, 1.0, 1.0]] groundtruth = box_list.BoxList(tf.constant(corners)) priors = box_list.BoxList(tf.constant(corners)) if tf_version.is_tf1(): multibox_ta = (targetassigner .create_target_assigner('Multibox', stage='proposal')) multibox_ta.assign(priors, groundtruth) # No tests on output, as that may vary arbitrarily as new target assigners # are added. As long as it is constructed correctly and runs without errors, # tests on the individual assigners cover correctness of the assignments. anchors = box_list.BoxList(tf.constant(corners)) faster_rcnn_proposals_ta = (targetassigner .create_target_assigner('FasterRCNN', stage='proposal')) faster_rcnn_proposals_ta.assign(anchors, groundtruth) fast_rcnn_ta = (targetassigner .create_target_assigner('FastRCNN')) fast_rcnn_ta.assign(anchors, groundtruth) faster_rcnn_detection_ta = (targetassigner .create_target_assigner('FasterRCNN', stage='detection')) faster_rcnn_detection_ta.assign(anchors, groundtruth) with self.assertRaises(ValueError): targetassigner.create_target_assigner('InvalidDetector', stage='invalid_stage') def _array_argmax(array): return np.unravel_index(np.argmax(array), array.shape) class CenterNetCenterHeatmapTargetAssignerTest(test_case.TestCase, parameterized.TestCase): def setUp(self): super(CenterNetCenterHeatmapTargetAssignerTest, self).setUp() self._box_center = [0.0, 0.0, 1.0, 1.0] self._box_center_small = [0.25, 0.25, 0.75, 0.75] self._box_lower_left = [0.5, 0.0, 1.0, 0.5] self._box_center_offset = [0.1, 0.05, 1.0, 1.0] self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] def test_center_location(self): """Test that the centers are at the correct location.""" def graph_fn(): box_batch = [tf.constant([self._box_center, self._box_lower_left])] classes = [ tf.one_hot([0, 1], depth=4), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, classes) return targets targets = self.execute(graph_fn, []) self.assertEqual((10, 10), _array_argmax(targets[0, :, :, 0])) self.assertAlmostEqual(1.0, targets[0, 10, 10, 0]) self.assertEqual((15, 5), _array_argmax(targets[0, :, :, 1])) self.assertAlmostEqual(1.0, targets[0, 15, 5, 1]) @parameterized.parameters( {'keypoint_weights_for_center': [1.0, 1.0, 1.0, 1.0]}, {'keypoint_weights_for_center': [0.0, 0.0, 1.0, 1.0]}, ) def test_center_location_by_keypoints(self, keypoint_weights_for_center): """Test that the centers are at the correct location.""" kpts_y = [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.0, 0.0, 0.0, 0.0]] kpts_x = [[0.5, 0.6, 0.7, 0.8], [0.1, 0.2, 0.3, 0.4], [0.0, 0.0, 0.0, 0.0]] gt_keypoints_list = [ tf.stack([tf.constant(kpts_y), tf.constant(kpts_x)], axis=2) ] kpts_weight = [[1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0], [1.0, 0.0, 1.0, 0.0]] gt_keypoints_weights_list = [tf.constant(kpts_weight)] gt_classes_list = [ tf.one_hot([0, 0, 0], depth=1), ] gt_weights_list = [tf.constant([1.0, 1.0, 0.0])] def graph_fn(): assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( 4, keypoint_class_id=0, keypoint_indices=[0, 1, 2, 3], keypoint_weights_for_center=keypoint_weights_for_center) targets = assigner.assign_center_targets_from_keypoints( 80, 80, gt_classes_list=gt_classes_list, gt_keypoints_list=gt_keypoints_list, gt_weights_list=gt_weights_list, gt_keypoints_weights_list=gt_keypoints_weights_list) return targets targets = self.execute(graph_fn, []) if sum(keypoint_weights_for_center) == 4.0: # There should be two peaks at location (5, 13), and (12, 4). # (5, 13) = ((0.1 + 0.2 + 0.3 + 0.4) / 4 * 80 / 4, # (0.5 + 0.6 + 0.7 + 0.8) / 4 * 80 / 4) # (12, 4) = ((0.5 + 0.7) / 2 * 80 / 4, # (0.1 + 0.3) / 2 * 80 / 4) self.assertEqual((5, 13), _array_argmax(targets[0, :, :, 0])) self.assertAlmostEqual(1.0, targets[0, 5, 13, 0]) self.assertEqual((1, 20, 20, 1), targets.shape) targets[0, 5, 13, 0] = 0.0 self.assertEqual((12, 4), _array_argmax(targets[0, :, :, 0])) self.assertAlmostEqual(1.0, targets[0, 12, 4, 0]) else: # There should be two peaks at location (5, 13), and (12, 4). # (7, 15) = ((0.3 + 0.4) / 2 * 80 / 4, # (0.7 + 0.8) / 2 * 80 / 4) # (14, 6) = (0.7 * 80 / 4, 0.3 * 80 / 4) self.assertEqual((7, 15), _array_argmax(targets[0, :, :, 0])) self.assertAlmostEqual(1.0, targets[0, 7, 15, 0]) self.assertEqual((1, 20, 20, 1), targets.shape) targets[0, 7, 15, 0] = 0.0 self.assertEqual((14, 6), _array_argmax(targets[0, :, :, 0])) self.assertAlmostEqual(1.0, targets[0, 14, 6, 0]) def test_center_batch_shape(self): """Test that the shape of the target for a batch is correct.""" def graph_fn(): box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_center]), tf.constant([self._box_center_small]), ] classes = [ tf.one_hot([0, 1], depth=4), tf.one_hot([2], depth=4), tf.one_hot([3], depth=4), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, classes) return targets targets = self.execute(graph_fn, []) self.assertEqual((3, 20, 20, 4), targets.shape) def test_center_overlap_maximum(self): """Test that when boxes overlap we, are computing the maximum.""" def graph_fn(): box_batch = [ tf.constant([ self._box_center, self._box_center_offset, self._box_center, self._box_center_offset ]) ] classes = [ tf.one_hot([0, 0, 1, 2], depth=4), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, classes) return targets targets = self.execute(graph_fn, []) class0_targets = targets[0, :, :, 0] class1_targets = targets[0, :, :, 1] class2_targets = targets[0, :, :, 2] np.testing.assert_allclose(class0_targets, np.maximum(class1_targets, class2_targets)) def test_size_blur(self): """Test that the heatmap of a larger box is more blurred.""" def graph_fn(): box_batch = [tf.constant([self._box_center, self._box_center_small])] classes = [ tf.one_hot([0, 1], depth=4), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, classes) return targets targets = self.execute(graph_fn, []) self.assertGreater( np.count_nonzero(targets[:, :, :, 0]), np.count_nonzero(targets[:, :, :, 1])) def test_weights(self): """Test that the weights correctly ignore ground truth.""" def graph1_fn(): box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_center]), tf.constant([self._box_center_small]), ] classes = [ tf.one_hot([0, 1], depth=4), tf.one_hot([2], depth=4), tf.one_hot([3], depth=4), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, classes) return targets targets = self.execute(graph1_fn, []) self.assertAlmostEqual(1.0, targets[0, :, :, 0].max()) self.assertAlmostEqual(1.0, targets[0, :, :, 1].max()) self.assertAlmostEqual(1.0, targets[1, :, :, 2].max()) self.assertAlmostEqual(1.0, targets[2, :, :, 3].max()) self.assertAlmostEqual(0.0, targets[0, :, :, [2, 3]].max()) self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max()) self.assertAlmostEqual(0.0, targets[2, :, :, :3].max()) def graph2_fn(): weights = [ tf.constant([0., 1.]), tf.constant([1.]), tf.constant([1.]), ] box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_center]), tf.constant([self._box_center_small]), ] classes = [ tf.one_hot([0, 1], depth=4), tf.one_hot([2], depth=4), tf.one_hot([3], depth=4), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4) targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch, classes, weights) return targets targets = self.execute(graph2_fn, []) self.assertAlmostEqual(1.0, targets[0, :, :, 1].max()) self.assertAlmostEqual(1.0, targets[1, :, :, 2].max()) self.assertAlmostEqual(1.0, targets[2, :, :, 3].max()) self.assertAlmostEqual(0.0, targets[0, :, :, [0, 2, 3]].max()) self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max()) self.assertAlmostEqual(0.0, targets[2, :, :, :3].max()) def test_low_overlap(self): def graph1_fn(): box_batch = [tf.constant([self._box_center])] classes = [ tf.one_hot([0], depth=2), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( 4, min_overlap=0.1) targets_low_overlap = assigner.assign_center_targets_from_boxes( 80, 80, box_batch, classes) return targets_low_overlap targets_low_overlap = self.execute(graph1_fn, []) self.assertLess(1, np.count_nonzero(targets_low_overlap)) def graph2_fn(): box_batch = [tf.constant([self._box_center])] classes = [ tf.one_hot([0], depth=2), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( 4, min_overlap=0.6) targets_medium_overlap = assigner.assign_center_targets_from_boxes( 80, 80, box_batch, classes) return targets_medium_overlap targets_medium_overlap = self.execute(graph2_fn, []) self.assertLess(1, np.count_nonzero(targets_medium_overlap)) def graph3_fn(): box_batch = [tf.constant([self._box_center])] classes = [ tf.one_hot([0], depth=2), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( 4, min_overlap=0.99) targets_high_overlap = assigner.assign_center_targets_from_boxes( 80, 80, box_batch, classes) return targets_high_overlap targets_high_overlap = self.execute(graph3_fn, []) self.assertTrue(np.all(targets_low_overlap >= targets_medium_overlap)) self.assertTrue(np.all(targets_medium_overlap >= targets_high_overlap)) def test_empty_box_list(self): """Test that an empty box list gives an all 0 heatmap.""" def graph_fn(): box_batch = [ tf.zeros((0, 4), dtype=tf.float32), ] classes = [ tf.zeros((0, 5), dtype=tf.float32), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( 4, min_overlap=0.1) targets = assigner.assign_center_targets_from_boxes( 80, 80, box_batch, classes) return targets targets = self.execute(graph_fn, []) np.testing.assert_allclose(targets, 0.) class CenterNetBoxTargetAssignerTest(test_case.TestCase): def setUp(self): super(CenterNetBoxTargetAssignerTest, self).setUp() self._box_center = [0.0, 0.0, 1.0, 1.0] self._box_center_small = [0.25, 0.25, 0.75, 0.75] self._box_lower_left = [0.5, 0.0, 1.0, 0.5] self._box_center_offset = [0.1, 0.05, 1.0, 1.0] self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] def test_max_distance_for_overlap(self): """Test that the distance ensures the IoU with random boxes.""" # TODO(vighneshb) remove this after the `_smallest_positive_root` # function if fixed. self.skipTest(('Skipping test because we are using an incorrect version of' 'the `max_distance_for_overlap` function to reproduce' ' results.')) rng = np.random.RandomState(0) n_samples = 100 width = rng.uniform(1, 100, size=n_samples) height = rng.uniform(1, 100, size=n_samples) min_iou = rng.uniform(0.1, 1.0, size=n_samples) def graph_fn(): max_dist = targetassigner.max_distance_for_overlap(height, width, min_iou) return max_dist max_dist = self.execute(graph_fn, []) xmin1 = np.zeros(n_samples) ymin1 = np.zeros(n_samples) xmax1 = np.zeros(n_samples) + width ymax1 = np.zeros(n_samples) + height xmin2 = max_dist * np.cos(rng.uniform(0, 2 * np.pi)) ymin2 = max_dist * np.sin(rng.uniform(0, 2 * np.pi)) xmax2 = width + max_dist * np.cos(rng.uniform(0, 2 * np.pi)) ymax2 = height + max_dist * np.sin(rng.uniform(0, 2 * np.pi)) boxes1 = np.vstack([ymin1, xmin1, ymax1, xmax1]).T boxes2 = np.vstack([ymin2, xmin2, ymax2, xmax2]).T iou = np.diag(np_box_ops.iou(boxes1, boxes2)) self.assertTrue(np.all(iou >= min_iou)) def test_max_distance_for_overlap_centernet(self): """Test the version of the function used in the CenterNet paper.""" def graph_fn(): distance = targetassigner.max_distance_for_overlap(10, 5, 0.5) return distance distance = self.execute(graph_fn, []) self.assertAlmostEqual(2.807764064, distance) def test_assign_size_and_offset_targets(self): """Test the assign_size_and_offset_targets function.""" def graph_fn(): box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_center_offset]), tf.constant([self._box_center_small, self._box_odd_coordinates]), ] assigner = targetassigner.CenterNetBoxTargetAssigner(4) indices, hw, yx_offset, weights = assigner.assign_size_and_offset_targets( 80, 80, box_batch) return indices, hw, yx_offset, weights indices, hw, yx_offset, weights = self.execute(graph_fn, []) self.assertEqual(indices.shape, (5, 3)) self.assertEqual(hw.shape, (5, 2)) self.assertEqual(yx_offset.shape, (5, 2)) self.assertEqual(weights.shape, (5,)) np.testing.assert_array_equal( indices, [[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]]) np.testing.assert_array_equal( hw, [[20, 20], [10, 10], [18, 19], [10, 10], [8, 15]]) np.testing.assert_array_equal( yx_offset, [[0, 0], [0, 0], [0, 0.5], [0, 0], [0.25, 0.75]]) np.testing.assert_array_equal(weights, 1) def test_assign_size_and_offset_targets_weights(self): """Test the assign_size_and_offset_targets function with box weights.""" def graph_fn(): box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_lower_left, self._box_center_small]), tf.constant([self._box_center_small, self._box_odd_coordinates]), ] cn_assigner = targetassigner.CenterNetBoxTargetAssigner(4) weights_batch = [ tf.constant([0.0, 1.0]), tf.constant([1.0, 1.0]), tf.constant([0.0, 0.0]) ] indices, hw, yx_offset, weights = cn_assigner.assign_size_and_offset_targets( 80, 80, box_batch, weights_batch) return indices, hw, yx_offset, weights indices, hw, yx_offset, weights = self.execute(graph_fn, []) self.assertEqual(indices.shape, (6, 3)) self.assertEqual(hw.shape, (6, 2)) self.assertEqual(yx_offset.shape, (6, 2)) self.assertEqual(weights.shape, (6,)) np.testing.assert_array_equal(indices, [[0, 10, 10], [0, 15, 5], [1, 15, 5], [1, 10, 10], [2, 10, 10], [2, 7, 11]]) np.testing.assert_array_equal( hw, [[20, 20], [10, 10], [10, 10], [10, 10], [10, 10], [8, 15]]) np.testing.assert_array_equal( yx_offset, [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0.25, 0.75]]) np.testing.assert_array_equal(weights, [0, 1, 1, 1, 0, 0]) def test_get_batch_predictions_from_indices(self): """Test the get_batch_predictions_from_indices function. This test verifies that the indices returned by assign_size_and_offset_targets function work as expected with a predicted tensor. """ def graph_fn(): pred_array = np.ones((2, 40, 20, 2), dtype=np.int32) * -1000 pred_array[0, 20, 10] = [1, 2] pred_array[0, 30, 5] = [3, 4] pred_array[1, 20, 10] = [5, 6] pred_array[1, 14, 11] = [7, 8] pred_tensor = tf.constant(pred_array) indices = tf.constant([ [0, 20, 10], [0, 30, 5], [1, 20, 10], [1, 14, 11] ], dtype=tf.int32) preds = targetassigner.get_batch_predictions_from_indices( pred_tensor, indices) return preds preds = self.execute(graph_fn, []) np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]]) def test_get_batch_predictions_from_indices_with_class(self): """Test the get_batch_predictions_from_indices function with class axis. This test verifies that the indices returned by assign_size_and_offset_targets function work as expected with a predicted tensor. """ def graph_fn(): pred_array = np.ones((2, 40, 20, 5, 2), dtype=np.int32) * -1000 pred_array[0, 20, 10, 0] = [1, 2] pred_array[0, 30, 5, 2] = [3, 4] pred_array[1, 20, 10, 1] = [5, 6] pred_array[1, 14, 11, 4] = [7, 8] pred_tensor = tf.constant(pred_array) indices = tf.constant([ [0, 20, 10, 0], [0, 30, 5, 2], [1, 20, 10, 1], [1, 14, 11, 4] ], dtype=tf.int32) preds = targetassigner.get_batch_predictions_from_indices( pred_tensor, indices) return preds preds = self.execute(graph_fn, []) np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]]) class CenterNetIOUTargetAssignerTest(test_case.TestCase): def setUp(self): super(CenterNetIOUTargetAssignerTest, self).setUp() self._box_center = [0.0, 0.0, 1.0, 1.0] self._box_center_small = [0.25, 0.25, 0.75, 0.75] self._box_lower_left = [0.5, 0.0, 1.0, 0.5] self._box_center_offset = [0.1, 0.05, 1.0, 1.0] self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] def test_center_location(self): """Test that the centers are at the correct location.""" def graph_fn(): box_batch = [tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_lower_left, self._box_center])] classes = [ tf.one_hot([0, 1], depth=4), tf.one_hot([2, 2], depth=4) ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( 4, box_heatmap_type='iou') targets = assigner.assign_center_targets_from_boxes( 80, 80, box_batch, classes) return targets targets = self.execute(graph_fn, []) self.assertEqual((10, 10), _array_argmax(targets[0, :, :, 0])) self.assertAlmostEqual(1.0, targets[0, 10, 10, 0]) self.assertEqual((15, 5), _array_argmax(targets[0, :, :, 1])) self.assertAlmostEqual(1.0, targets[0, 15, 5, 1]) self.assertAlmostEqual(1.0, targets[1, 15, 5, 2]) self.assertAlmostEqual(1.0, targets[1, 10, 10, 2]) self.assertAlmostEqual(0.0, targets[1, 0, 19, 1]) def test_exponent(self): """Test that the centers are at the correct location.""" def graph_fn(): box_batch = [tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_lower_left, self._box_center])] classes = [ tf.one_hot([0], depth=2), ] assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( 1, box_heatmap_type='iou') targets = assigner.assign_center_targets_from_boxes( 4, 4, box_batch, classes) assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner( 1, box_heatmap_type='iou', heatmap_exponent=0.5) targets_pow = assigner.assign_center_targets_from_boxes( 4, 4, box_batch, classes) return targets, targets_pow targets, targets_pow = self.execute(graph_fn, []) self.assertLess(targets[0, 2, 3, 0], 1.0) self.assertLess(targets_pow[0, 2, 3, 0], 1.0) self.assertAlmostEqual(targets[0, 2, 3, 0], targets_pow[0, 2, 3, 0] ** 2) class CenterNetKeypointTargetAssignerTest(test_case.TestCase): def test_keypoint_heatmap_targets(self): def graph_fn(): gt_classes_list = [ tf.one_hot([0, 1, 0, 1], depth=4), ] coordinates = tf.expand_dims( tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, float('nan'), 0.9, 1.0], [0.4, 0.1, 0.4, 0.2, 0.1], [float('nan'), 0.1, 0.5, 0.7, 0.6]]), dtype=tf.float32), axis=2) gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] gt_boxes_list = [ tf.constant( np.array([[0.0, 0.0, 0.3, 0.3], [0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0]]), dtype=tf.float32) ] cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( stride=4, class_id=1, keypoint_indices=[0, 2]) (targets, num_instances_batch, valid_mask) = cn_assigner.assign_keypoint_heatmap_targets( 120, 80, gt_keypoints_list, gt_classes_list, gt_boxes_list=gt_boxes_list) return targets, num_instances_batch, valid_mask targets, num_instances_batch, valid_mask = self.execute(graph_fn, []) # keypoint (0.5, 0.5) is selected. The peak is expected to appear at the # center of the image. self.assertEqual((15, 10), _array_argmax(targets[0, :, :, 1])) self.assertAlmostEqual(1.0, targets[0, 15, 10, 1]) # No peak for the first class since NaN is selected. self.assertAlmostEqual(0.0, targets[0, 15, 10, 0]) # Verify the output heatmap shape. self.assertAllEqual([1, 30, 20, 2], targets.shape) # Verify the number of instances is correct. np.testing.assert_array_almost_equal([[0, 1]], num_instances_batch) self.assertAllEqual([1, 30, 20, 2], valid_mask.shape) # When calling the function, we specify the class id to be 1 (1th and 3rd) # instance and the keypoint indices to be [0, 2], meaning that the 1st # instance is the target class with no valid keypoints in it. As a result, # the region of both keypoint types of the 1st instance boxing box should be # blacked out (0.0, 0.0, 0.5, 0.5), transfering to (0, 0, 15, 10) in # absolute output space. self.assertAlmostEqual(np.sum(valid_mask[:, 0:15, 0:10, 0:2]), 0.0) # For the 2nd instance, only the 1st keypoint has visibility of 0 so only # the corresponding valid mask contains zeros. self.assertAlmostEqual(np.sum(valid_mask[:, 15:30, 10:20, 0]), 0.0) # All other values are 1.0 so the sum is: # 30 * 20 * 2 - 15 * 10 * 2 - 15 * 10 * 1 = 750. self.assertAlmostEqual(np.sum(valid_mask), 750.0) def test_assign_keypoints_offset_targets(self): def graph_fn(): gt_classes_list = [ tf.one_hot([0, 1, 0, 1], depth=4), ] coordinates = tf.expand_dims( tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, float('nan'), 0.9, 0.4], [0.4, 0.1, 0.4, 0.2, 0.0], [float('nan'), 0.0, 0.12, 0.7, 0.4]]), dtype=tf.float32), axis=2) gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( stride=4, class_id=1, keypoint_indices=[0, 2]) (indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets( height=120, width=80, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list) return indices, weights, offsets indices, weights, offsets = self.execute(graph_fn, []) # Only the last element has positive weight. np.testing.assert_array_almost_equal( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights) # Validate the last element's indices and offsets. np.testing.assert_array_equal([0, 3, 2], indices[7, :]) np.testing.assert_array_almost_equal([0.6, 0.4], offsets[7, :]) def test_assign_keypoint_depths_target(self): def graph_fn(): gt_classes_list = [ tf.one_hot([0, 1, 0, 1], depth=4), ] coordinates = tf.expand_dims( tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, 0.7, 0.9, 0.4], [0.4, 0.1, 0.4, 0.2, 0.0], [float('nan'), 0.0, 0.12, 0.7, 0.4]]), dtype=tf.float32), axis=2) gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] depths = tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, float('nan'), 0.9, 0.4], [0.4, 0.1, 0.4, 0.2, 0.0], [0.5, 0.0, 7.0, 0.7, 0.4]]), dtype=tf.float32) gt_keypoint_depths_list = [depths] gt_keypoint_depth_weights = tf.constant( np.array([[1.0, 1.0, 1.0, 1.0, 1.0], [float('nan'), 0.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.5, 1.0, 1.0]]), dtype=tf.float32) gt_keypoint_depth_weights_list = [gt_keypoint_depth_weights] cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( stride=4, class_id=1, keypoint_indices=[0, 2], peak_radius=1) (indices, depths, weights) = cn_assigner.assign_keypoints_depth_targets( height=120, width=80, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list, gt_keypoint_depths_list=gt_keypoint_depths_list, gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list) return indices, depths, weights indices, depths, weights = self.execute(graph_fn, []) # Only the last 5 elements has positive weight. np.testing.assert_array_almost_equal([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5 ], weights) # Validate the last 5 elements' depth value. np.testing.assert_array_almost_equal( [7.0, 7.0, 7.0, 7.0, 7.0], depths[35:, 0]) self.assertEqual((40, 3), indices.shape) np.testing.assert_array_equal([0, 2, 2], indices[35, :]) def test_assign_keypoint_depths_per_keypoints(self): def graph_fn(): gt_classes_list = [ tf.one_hot([0, 1, 0, 1], depth=4), ] coordinates = tf.expand_dims( tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, 0.7, 0.9, 0.4], [0.4, 0.1, 0.4, 0.2, 0.0], [float('nan'), 0.0, 0.12, 0.7, 0.4]]), dtype=tf.float32), axis=2) gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] depths = tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, float('nan'), 0.9, 0.4], [0.4, 0.1, 0.4, 0.2, 0.0], [0.5, 0.0, 7.0, 0.7, 0.4]]), dtype=tf.float32) gt_keypoint_depths_list = [depths] gt_keypoint_depth_weights = tf.constant( np.array([[1.0, 1.0, 1.0, 1.0, 1.0], [float('nan'), 0.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.5, 1.0, 1.0]]), dtype=tf.float32) gt_keypoint_depth_weights_list = [gt_keypoint_depth_weights] cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( stride=4, class_id=1, keypoint_indices=[0, 2], peak_radius=1, per_keypoint_depth=True) (indices, depths, weights) = cn_assigner.assign_keypoints_depth_targets( height=120, width=80, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list, gt_keypoint_depths_list=gt_keypoint_depths_list, gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list) return indices, depths, weights indices, depths, weights = self.execute(graph_fn, []) # Only the last 5 elements has positive weight. np.testing.assert_array_almost_equal([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5 ], weights) # Validate the last 5 elements' depth value. np.testing.assert_array_almost_equal( [7.0, 7.0, 7.0, 7.0, 7.0], depths[35:, 0]) self.assertEqual((40, 4), indices.shape) np.testing.assert_array_equal([0, 2, 2, 1], indices[35, :]) def test_assign_keypoints_offset_targets_radius(self): def graph_fn(): gt_classes_list = [ tf.one_hot([0, 1, 0, 1], depth=4), ] coordinates = tf.expand_dims( tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, float('nan'), 0.9, 0.4], [0.4, 0.1, 0.4, 0.2, 0.0], [float('nan'), 0.0, 0.12, 0.7, 0.4]]), dtype=tf.float32), axis=2) gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( stride=4, class_id=1, keypoint_indices=[0, 2], peak_radius=1, per_keypoint_offset=True) (indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets( height=120, width=80, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list) return indices, weights, offsets indices, weights, offsets = self.execute(graph_fn, []) # There are total 8 * 5 (neighbors) = 40 targets. self.assertAllEqual(indices.shape, [40, 4]) self.assertAllEqual(offsets.shape, [40, 2]) self.assertAllEqual(weights.shape, [40]) # Only the last 5 (radius 1 generates 5 valid points) element has positive # weight. np.testing.assert_array_almost_equal([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 ], weights) # Validate the last element's (with neighbors) indices and offsets. np.testing.assert_array_equal([0, 2, 2, 1], indices[35, :]) np.testing.assert_array_equal([0, 3, 1, 1], indices[36, :]) np.testing.assert_array_equal([0, 3, 2, 1], indices[37, :]) np.testing.assert_array_equal([0, 3, 3, 1], indices[38, :]) np.testing.assert_array_equal([0, 4, 2, 1], indices[39, :]) np.testing.assert_array_almost_equal([1.6, 0.4], offsets[35, :]) np.testing.assert_array_almost_equal([0.6, 1.4], offsets[36, :]) np.testing.assert_array_almost_equal([0.6, 0.4], offsets[37, :]) np.testing.assert_array_almost_equal([0.6, -0.6], offsets[38, :]) np.testing.assert_array_almost_equal([-0.4, 0.4], offsets[39, :]) def test_assign_joint_regression_targets(self): def graph_fn(): gt_boxes_list = [ tf.constant( np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]), dtype=tf.float32) ] gt_classes_list = [ tf.one_hot([0, 1, 0, 1], depth=4), ] coordinates = tf.expand_dims( tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, float('nan'), 0.9, 0.4], [0.4, 0.1, 0.4, 0.2, 0.0], [float('nan'), 0.0, 0.12, 0.7, 0.4]]), dtype=tf.float32), axis=2) gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( stride=4, class_id=1, keypoint_indices=[0, 2]) (indices, offsets, weights) = cn_assigner.assign_joint_regression_targets( height=120, width=80, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list) return indices, offsets, weights indices, offsets, weights = self.execute(graph_fn, []) np.testing.assert_array_almost_equal( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights) np.testing.assert_array_equal([0, 15, 10, 1], indices[7, :]) np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[7, :]) def test_assign_joint_regression_targets_radius(self): def graph_fn(): gt_boxes_list = [ tf.constant( np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]), dtype=tf.float32) ] gt_classes_list = [ tf.one_hot([0, 1, 0, 1], depth=4), ] coordinates = tf.expand_dims( tf.constant( np.array([[0.1, 0.2, 0.3, 0.4, 0.5], [float('nan'), 0.7, float('nan'), 0.9, 0.4], [0.4, 0.1, 0.4, 0.2, 0.0], [float('nan'), 0.0, 0.12, 0.7, 0.4]]), dtype=tf.float32), axis=2) gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)] cn_assigner = targetassigner.CenterNetKeypointTargetAssigner( stride=4, class_id=1, keypoint_indices=[0, 2], peak_radius=1) (indices, offsets, weights) = cn_assigner.assign_joint_regression_targets( height=120, width=80, gt_keypoints_list=gt_keypoints_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list) return indices, offsets, weights indices, offsets, weights = self.execute(graph_fn, []) # There are total 8 * 5 (neighbors) = 40 targets. self.assertAllEqual(indices.shape, [40, 4]) self.assertAllEqual(offsets.shape, [40, 2]) self.assertAllEqual(weights.shape, [40]) # Only the last 5 (radius 1 generates 5 valid points) element has positive # weight. np.testing.assert_array_almost_equal([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 ], weights) # Test the values of the indices and offsets of the last 5 elements. np.testing.assert_array_equal([0, 14, 10, 1], indices[35, :]) np.testing.assert_array_equal([0, 15, 9, 1], indices[36, :]) np.testing.assert_array_equal([0, 15, 10, 1], indices[37, :]) np.testing.assert_array_equal([0, 15, 11, 1], indices[38, :]) np.testing.assert_array_equal([0, 16, 10, 1], indices[39, :]) np.testing.assert_array_almost_equal([-10.4, -7.6], offsets[35, :]) np.testing.assert_array_almost_equal([-11.4, -6.6], offsets[36, :]) np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[37, :]) np.testing.assert_array_almost_equal([-11.4, -8.6], offsets[38, :]) np.testing.assert_array_almost_equal([-12.4, -7.6], offsets[39, :]) class CenterNetMaskTargetAssignerTest(test_case.TestCase): def test_assign_segmentation_targets(self): def graph_fn(): gt_masks_list = [ # Example 0. tf.constant([ [ [1., 0., 0., 0.], [1., 1., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], ], [ [0., 0., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.], [0., 0., 0., 0.], ], [ [1., 1., 0., 0.], [1., 1., 0., 0.], [0., 0., 1., 1.], [0., 0., 1., 1.], ] ], dtype=tf.float32), # Example 1. tf.constant([ [ [1., 1., 0., 1.], [1., 1., 1., 1.], [0., 0., 1., 1.], [0., 0., 0., 1.], ], [ [0., 0., 0., 0.], [0., 0., 0., 0.], [1., 1., 0., 0.], [1., 1., 0., 0.], ], ], dtype=tf.float32), ] gt_classes_list = [ # Example 0. tf.constant([[1., 0., 0.], [0., 1., 0.], [1., 0., 0.]], dtype=tf.float32), # Example 1. tf.constant([[0., 1., 0.], [0., 1., 0.]], dtype=tf.float32) ] gt_boxes_list = [ # Example 0. tf.constant([[0.0, 0.0, 0.5, 0.5], [0.0, 0.5, 0.5, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=tf.float32), # Example 1. tf.constant([[0.0, 0.0, 1.0, 1.0], [0.5, 0.0, 1.0, 0.5]], dtype=tf.float32) ] gt_mask_weights_list = [ # Example 0. tf.constant([0.0, 1.0, 1.0], dtype=tf.float32), # Example 1. tf.constant([1.0, 1.0], dtype=tf.float32) ] cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=2) segmentation_target, segmentation_weight = ( cn_assigner.assign_segmentation_targets( gt_masks_list=gt_masks_list, gt_classes_list=gt_classes_list, gt_boxes_list=gt_boxes_list, gt_mask_weights_list=gt_mask_weights_list, mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR)) return segmentation_target, segmentation_weight segmentation_target, segmentation_weight = self.execute(graph_fn, []) expected_seg_target = np.array([ # Example 0 [[class 0, class 1], [background, class 0]] [[[1, 0, 0], [0, 1, 0]], [[0, 0, 0], [1, 0, 0]]], # Example 1 [[class 1, class 1], [class 1, class 1]] [[[0, 1, 0], [0, 1, 0]], [[0, 1, 0], [0, 1, 0]]], ], dtype=np.float32) np.testing.assert_array_almost_equal( expected_seg_target, segmentation_target) expected_seg_weight = np.array([ [[0, 1], [1, 1]], [[1, 1], [1, 1]]], dtype=np.float32) np.testing.assert_array_almost_equal( expected_seg_weight, segmentation_weight) def test_assign_segmentation_targets_no_objects(self): def graph_fn(): gt_masks_list = [tf.zeros((0, 5, 5))] gt_classes_list = [tf.zeros((0, 10))] cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=1) segmentation_target, _ = cn_assigner.assign_segmentation_targets( gt_masks_list=gt_masks_list, gt_classes_list=gt_classes_list, mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR) return segmentation_target segmentation_target = self.execute(graph_fn, []) expected_seg_target = np.zeros((1, 5, 5, 10)) np.testing.assert_array_almost_equal( expected_seg_target, segmentation_target) class CenterNetDensePoseTargetAssignerTest(test_case.TestCase): def test_assign_part_and_coordinate_targets(self): def graph_fn(): gt_dp_num_points_list = [ # Example 0. tf.constant([2, 0, 3], dtype=tf.int32), # Example 1. tf.constant([1, 1], dtype=tf.int32), ] gt_dp_part_ids_list = [ # Example 0. tf.constant([[1, 6, 0], [0, 0, 0], [0, 2, 3]], dtype=tf.int32), # Example 1. tf.constant([[7, 0, 0], [0, 0, 0]], dtype=tf.int32), ] gt_dp_surface_coords_list = [ # Example 0. tf.constant( [[[0.11, 0.2, 0.3, 0.4], # Box 0. [0.6, 0.4, 0.1, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], # Box 1. [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.22, 0.1, 0.6, 0.8], # Box 2. [0.0, 0.4, 0.5, 1.0], [0.3, 0.2, 0.4, 0.1]]], dtype=tf.float32), # Example 1. tf.constant( [[[0.5, 0.5, 0.3, 1.0], # Box 0. [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.2, 0.2, 0.5, 0.8], # Box 1. [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], dtype=tf.float32), ] gt_weights_list = [ # Example 0. tf.constant([1.0, 1.0, 0.5], dtype=tf.float32), # Example 1. tf.constant([0.0, 1.0], dtype=tf.float32), ] cn_assigner = targetassigner.CenterNetDensePoseTargetAssigner(stride=4) batch_indices, batch_part_ids, batch_surface_coords, batch_weights = ( cn_assigner.assign_part_and_coordinate_targets( height=120, width=80, gt_dp_num_points_list=gt_dp_num_points_list, gt_dp_part_ids_list=gt_dp_part_ids_list, gt_dp_surface_coords_list=gt_dp_surface_coords_list, gt_weights_list=gt_weights_list)) return batch_indices, batch_part_ids, batch_surface_coords, batch_weights batch_indices, batch_part_ids, batch_surface_coords, batch_weights = ( self.execute(graph_fn, [])) expected_batch_indices = np.array([ # Example 0. e.g. # The first set of indices is calculated as follows: # floor(0.11*120/4) = 3, floor(0.2*80/4) = 4. [0, 3, 4, 1], [0, 18, 8, 6], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 6, 2, 0], [0, 0, 8, 2], [0, 9, 4, 3], # Example 1. [1, 15, 10, 7], [1, 0, 0, 0], [1, 0, 0, 0], [1, 6, 4, 0], [1, 0, 0, 0], [1, 0, 0, 0] ], dtype=np.int32) expected_batch_part_ids = tf.one_hot( [1, 6, 0, 0, 0, 0, 0, 2, 3, 7, 0, 0, 0, 0, 0], depth=24).numpy() expected_batch_surface_coords = np.array([ # Box 0. [0.3, 0.4], [0.1, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.6, 0.8], [0.5, 1.0], [0.4, 0.1], # Box 1. [0.3, 1.0], [0.0, 0.0], [0.0, 0.0], [0.5, 0.8], [0.0, 0.0], [0.0, 0.0], ], np.float32) expected_batch_weights = np.array([ # Box 0. 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, # Box 1. 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 ], dtype=np.float32) self.assertAllEqual(expected_batch_indices, batch_indices) self.assertAllEqual(expected_batch_part_ids, batch_part_ids) self.assertAllClose(expected_batch_surface_coords, batch_surface_coords) self.assertAllClose(expected_batch_weights, batch_weights) class CenterNetTrackTargetAssignerTest(test_case.TestCase): def setUp(self): super(CenterNetTrackTargetAssignerTest, self).setUp() self._box_center = [0.0, 0.0, 1.0, 1.0] self._box_center_small = [0.25, 0.25, 0.75, 0.75] self._box_lower_left = [0.5, 0.0, 1.0, 0.5] self._box_center_offset = [0.1, 0.05, 1.0, 1.0] self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] def test_assign_track_targets(self): """Test the assign_track_targets function.""" def graph_fn(): box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_lower_left, self._box_center_small]), tf.constant([self._box_center_small, self._box_odd_coordinates]), ] track_id_batch = [ tf.constant([0, 1]), tf.constant([1, 0]), tf.constant([0, 2]), ] assigner = targetassigner.CenterNetTrackTargetAssigner( stride=4, num_track_ids=3) (batch_indices, batch_weights, track_targets) = assigner.assign_track_targets( height=80, width=80, gt_track_ids_list=track_id_batch, gt_boxes_list=box_batch) return batch_indices, batch_weights, track_targets indices, weights, track_ids = self.execute(graph_fn, []) self.assertEqual(indices.shape, (3, 2, 3)) self.assertEqual(track_ids.shape, (3, 2, 3)) self.assertEqual(weights.shape, (3, 2)) np.testing.assert_array_equal(indices, [[[0, 10, 10], [0, 15, 5]], [[1, 15, 5], [1, 10, 10]], [[2, 10, 10], [2, 7, 11]]]) np.testing.assert_array_equal(track_ids, [[[1, 0, 0], [0, 1, 0]], [[0, 1, 0], [1, 0, 0]], [[1, 0, 0], [0, 0, 1]]]) np.testing.assert_array_equal(weights, [[1, 1], [1, 1], [1, 1]]) def test_assign_track_targets_weights(self): """Test the assign_track_targets function with box weights.""" def graph_fn(): box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_lower_left, self._box_center_small]), tf.constant([self._box_center_small, self._box_odd_coordinates]), ] track_id_batch = [ tf.constant([0, 1]), tf.constant([1, 0]), tf.constant([0, 2]), ] weights_batch = [ tf.constant([0.0, 1.0]), tf.constant([1.0, 1.0]), tf.constant([0.0, 0.0]) ] assigner = targetassigner.CenterNetTrackTargetAssigner( stride=4, num_track_ids=3) (batch_indices, batch_weights, track_targets) = assigner.assign_track_targets( height=80, width=80, gt_track_ids_list=track_id_batch, gt_boxes_list=box_batch, gt_weights_list=weights_batch) return batch_indices, batch_weights, track_targets indices, weights, track_ids = self.execute(graph_fn, []) self.assertEqual(indices.shape, (3, 2, 3)) self.assertEqual(track_ids.shape, (3, 2, 3)) self.assertEqual(weights.shape, (3, 2)) np.testing.assert_array_equal(indices, [[[0, 10, 10], [0, 15, 5]], [[1, 15, 5], [1, 10, 10]], [[2, 10, 10], [2, 7, 11]]]) np.testing.assert_array_equal(track_ids, [[[1, 0, 0], [0, 1, 0]], [[0, 1, 0], [1, 0, 0]], [[1, 0, 0], [0, 0, 1]]]) np.testing.assert_array_equal(weights, [[0, 1], [1, 1], [0, 0]]) # TODO(xwwang): Add a test for the case when no objects are detected. class CornerOffsetTargetAssignerTest(test_case.TestCase): def test_filter_overlap_min_area_empty(self): """Test that empty masks work on CPU.""" def graph_fn(masks): return targetassigner.filter_mask_overlap_min_area(masks) masks = self.execute_cpu(graph_fn, [np.zeros((0, 5, 5), dtype=np.float32)]) self.assertEqual(masks.shape, (0, 5, 5)) def test_filter_overlap_min_area(self): """Test the object with min. area is selected instead of overlap.""" def graph_fn(masks): return targetassigner.filter_mask_overlap_min_area(masks) masks = np.zeros((3, 4, 4), dtype=np.float32) masks[0, :2, :2] = 1.0 masks[1, :3, :3] = 1.0 masks[2, 3, 3] = 1.0 masks = self.execute(graph_fn, [masks]) self.assertAllClose(masks[0], [[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) self.assertAllClose(masks[1], [[0, 0, 1, 0], [0, 0, 1, 0], [1, 1, 1, 0], [0, 0, 0, 0]]) self.assertAllClose(masks[2], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) def test_assign_corner_offset_single_object(self): """Test that corner offsets are correct with a single object.""" assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1) def graph_fn(): boxes = [ tf.constant([[0., 0., 1., 1.]]) ] mask = np.zeros((1, 4, 4), dtype=np.float32) mask[0, 1:3, 1:3] = 1.0 masks = [tf.constant(mask)] return assigner.assign_corner_offset_targets(boxes, masks) corner_offsets, foreground = self.execute(graph_fn, []) self.assertAllClose(foreground[0], [[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]) self.assertAllClose(corner_offsets[0, :, :, 0], [[0, 0, 0, 0], [0, -1, -1, 0], [0, -2, -2, 0], [0, 0, 0, 0]]) self.assertAllClose(corner_offsets[0, :, :, 1], [[0, 0, 0, 0], [0, -1, -2, 0], [0, -1, -2, 0], [0, 0, 0, 0]]) self.assertAllClose(corner_offsets[0, :, :, 2], [[0, 0, 0, 0], [0, 3, 3, 0], [0, 2, 2, 0], [0, 0, 0, 0]]) self.assertAllClose(corner_offsets[0, :, :, 3], [[0, 0, 0, 0], [0, 3, 2, 0], [0, 3, 2, 0], [0, 0, 0, 0]]) def test_assign_corner_offset_multiple_objects(self): """Test corner offsets are correct with multiple objects.""" assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1) def graph_fn(): boxes = [ tf.constant([[0., 0., 1., 1.], [0., 0., 0., 0.]]), tf.constant([[0., 0., .25, .25], [.25, .25, 1., 1.]]) ] mask1 = np.zeros((2, 4, 4), dtype=np.float32) mask1[0, 0, 0] = 1.0 mask1[0, 3, 3] = 1.0 mask2 = np.zeros((2, 4, 4), dtype=np.float32) mask2[0, :2, :2] = 1.0 mask2[1, 1:, 1:] = 1.0 masks = [tf.constant(mask1), tf.constant(mask2)] return assigner.assign_corner_offset_targets(boxes, masks) corner_offsets, foreground = self.execute(graph_fn, []) self.assertEqual(corner_offsets.shape, (2, 4, 4, 4)) self.assertEqual(foreground.shape, (2, 4, 4)) self.assertAllClose(foreground[0], [[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) self.assertAllClose(corner_offsets[0, :, :, 0], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, -3]]) self.assertAllClose(corner_offsets[0, :, :, 1], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, -3]]) self.assertAllClose(corner_offsets[0, :, :, 2], [[4, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) self.assertAllClose(corner_offsets[0, :, :, 3], [[4, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) self.assertAllClose(foreground[1], [[1, 1, 0, 0], [1, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]]) self.assertAllClose(corner_offsets[1, :, :, 0], [[0, 0, 0, 0], [-1, -1, 0, 0], [0, -1, -1, -1], [0, -2, -2, -2]]) self.assertAllClose(corner_offsets[1, :, :, 1], [[0, -1, 0, 0], [0, -1, -1, -2], [0, 0, -1, -2], [0, 0, -1, -2]]) self.assertAllClose(corner_offsets[1, :, :, 2], [[1, 1, 0, 0], [0, 0, 3, 3], [0, 2, 2, 2], [0, 1, 1, 1]]) self.assertAllClose(corner_offsets[1, :, :, 3], [[1, 0, 0, 0], [1, 0, 2, 1], [0, 3, 2, 1], [0, 3, 2, 1]]) def test_assign_corner_offsets_no_objects(self): """Test assignment works with empty input on cpu.""" assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1) def graph_fn(): boxes = [ tf.zeros((0, 4), dtype=tf.float32) ] masks = [tf.zeros((0, 5, 5), dtype=tf.float32)] return assigner.assign_corner_offset_targets(boxes, masks) corner_offsets, foreground = self.execute_cpu(graph_fn, []) self.assertAllClose(corner_offsets, np.zeros((1, 5, 5, 4))) self.assertAllClose(foreground, np.zeros((1, 5, 5))) class CenterNetTemporalOffsetTargetAssigner(test_case.TestCase): def setUp(self): super(CenterNetTemporalOffsetTargetAssigner, self).setUp() self._box_center = [0.0, 0.0, 1.0, 1.0] self._box_center_small = [0.25, 0.25, 0.75, 0.75] self._box_lower_left = [0.5, 0.0, 1.0, 0.5] self._box_center_offset = [0.1, 0.05, 1.0, 1.0] self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625] self._offset_center = [0.5, 0.4] self._offset_center_small = [0.1, 0.1] self._offset_lower_left = [-0.1, 0.1] self._offset_center_offset = [0.4, 0.3] self._offset_odd_coord = [0.125, -0.125] def test_assign_empty_groundtruths(self): """Tests the assign_offset_targets function with empty inputs.""" def graph_fn(): box_batch = [ tf.zeros((0, 4), dtype=tf.float32), ] offset_batch = [ tf.zeros((0, 2), dtype=tf.float32), ] match_flag_batch = [ tf.zeros((0), dtype=tf.float32), ] assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4) indices, temporal_offset, weights = assigner.assign_temporal_offset_targets( 80, 80, box_batch, offset_batch, match_flag_batch) return indices, temporal_offset, weights indices, temporal_offset, weights = self.execute(graph_fn, []) self.assertEqual(indices.shape, (0, 3)) self.assertEqual(temporal_offset.shape, (0, 2)) self.assertEqual(weights.shape, (0,)) def test_assign_offset_targets(self): """Tests the assign_offset_targets function.""" def graph_fn(): box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_center_offset]), tf.constant([self._box_center_small, self._box_odd_coordinates]), ] offset_batch = [ tf.constant([self._offset_center, self._offset_lower_left]), tf.constant([self._offset_center_offset]), tf.constant([self._offset_center_small, self._offset_odd_coord]), ] match_flag_batch = [ tf.constant([1.0, 1.0]), tf.constant([1.0]), tf.constant([1.0, 1.0]), ] assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4) indices, temporal_offset, weights = assigner.assign_temporal_offset_targets( 80, 80, box_batch, offset_batch, match_flag_batch) return indices, temporal_offset, weights indices, temporal_offset, weights = self.execute(graph_fn, []) self.assertEqual(indices.shape, (5, 3)) self.assertEqual(temporal_offset.shape, (5, 2)) self.assertEqual(weights.shape, (5,)) np.testing.assert_array_equal( indices, [[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]]) np.testing.assert_array_almost_equal( temporal_offset, [[0.5, 0.4], [-0.1, 0.1], [0.4, 0.3], [0.1, 0.1], [0.125, -0.125]]) np.testing.assert_array_equal(weights, 1) def test_assign_offset_targets_with_match_flags(self): """Tests the assign_offset_targets function with match flags.""" def graph_fn(): box_batch = [ tf.constant([self._box_center, self._box_lower_left]), tf.constant([self._box_center_offset]), tf.constant([self._box_center_small, self._box_odd_coordinates]), ] offset_batch = [ tf.constant([self._offset_center, self._offset_lower_left]), tf.constant([self._offset_center_offset]), tf.constant([self._offset_center_small, self._offset_odd_coord]), ] match_flag_batch = [ tf.constant([0.0, 1.0]), tf.constant([1.0]), tf.constant([1.0, 1.0]), ] cn_assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4) weights_batch = [ tf.constant([1.0, 0.0]), tf.constant([1.0]), tf.constant([1.0, 1.0]) ] indices, temporal_offset, weights = cn_assigner.assign_temporal_offset_targets( 80, 80, box_batch, offset_batch, match_flag_batch, weights_batch) return indices, temporal_offset, weights indices, temporal_offset, weights = self.execute(graph_fn, []) self.assertEqual(indices.shape, (5, 3)) self.assertEqual(temporal_offset.shape, (5, 2)) self.assertEqual(weights.shape, (5,)) np.testing.assert_array_equal( indices, [[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]]) np.testing.assert_array_almost_equal( temporal_offset, [[0.5, 0.4], [-0.1, 0.1], [0.4, 0.3], [0.1, 0.1], [0.125, -0.125]]) np.testing.assert_array_equal(weights, [0, 0, 1, 1, 1]) class DETRTargetAssignerTest(test_case.TestCase): def test_assign_detr(self): def graph_fn(pred_corners, groundtruth_box_corners, groundtruth_labels, predicted_labels): detr_target_assigner = targetassigner.DETRTargetAssigner() pred_boxlist = box_list.BoxList(pred_corners) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = detr_target_assigner.assign( pred_boxlist, groundtruth_boxlist, predicted_labels, groundtruth_labels) (cls_targets, cls_weights, reg_targets, reg_weights) = result return (cls_targets, cls_weights, reg_targets, reg_weights) pred_corners = np.array([[0.25, 0.25, 0.4, 0.2], [0.5, 0.8, 1.0, 0.8], [0.9, 0.5, 0.1, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]], dtype=np.float32) predicted_labels = np.array([[-3.0, 3.0], [2.0, 9.4], [5.0, 1.0]], dtype=np.float32) groundtruth_labels = np.array([[0.0, 1.0], [0.0, 1.0]], dtype=np.float32) exp_cls_targets = [[0, 1], [0, 1], [1, 0]] exp_cls_weights = [[1, 1], [1, 1], [1, 1]] exp_reg_targets = [[0.25, 0.25, 0.5, 0.5], [0.7, 0.7, 0.4, 0.4], [0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute_cpu( graph_fn, [pred_corners, groundtruth_box_corners, groundtruth_labels, predicted_labels]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) def test_batch_assign_detr(self): def graph_fn(pred_corners, groundtruth_box_corners, groundtruth_labels, predicted_labels): detr_target_assigner = targetassigner.DETRTargetAssigner() result = detr_target_assigner.batch_assign( pred_corners, groundtruth_box_corners, [predicted_labels], [groundtruth_labels]) (cls_targets, cls_weights, reg_targets, reg_weights) = result return (cls_targets, cls_weights, reg_targets, reg_weights) pred_corners = np.array([[[0.25, 0.25, 0.4, 0.2], [0.5, 0.8, 1.0, 0.8], [0.9, 0.5, 0.1, 1.0]]], dtype=np.float32) groundtruth_box_corners = np.array([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]]], dtype=np.float32) predicted_labels = np.array([[-3.0, 3.0], [2.0, 9.4], [5.0, 1.0]], dtype=np.float32) groundtruth_labels = np.array([[0.0, 1.0], [0.0, 1.0]], dtype=np.float32) exp_cls_targets = [[[0, 1], [0, 1], [1, 0]]] exp_cls_weights = [[[1, 1], [1, 1], [1, 1]]] exp_reg_targets = [[[0.25, 0.25, 0.5, 0.5], [0.7, 0.7, 0.4, 0.4], [0, 0, 0, 0]]] exp_reg_weights = [[1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute_cpu( graph_fn, [pred_corners, groundtruth_box_corners, groundtruth_labels, predicted_labels]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEqual(cls_targets_out.dtype, np.float32) self.assertEqual(cls_weights_out.dtype, np.float32) self.assertEqual(reg_targets_out.dtype, np.float32) self.assertEqual(reg_weights_out.dtype, np.float32) if __name__ == '__main__': tf.enable_v2_behavior() tf.test.main()
120,378
42.038613
85
py
models
models-master/research/object_detection/core/freezable_batch_norm.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A freezable batch norm layer that uses Keras batch normalization.""" import tensorflow.compat.v1 as tf class FreezableBatchNorm(tf.keras.layers.BatchNormalization): """Batch normalization layer (Ioffe and Szegedy, 2014). This is a `freezable` batch norm layer that supports setting the `training` parameter in the __init__ method rather than having to set it either via the Keras learning phase or via the `call` method parameter. This layer will forward all other parameters to the default Keras `BatchNormalization` layer This is class is necessary because Object Detection model training sometimes requires batch normalization layers to be `frozen` and used as if it was evaluation time, despite still training (and potentially using dropout layers) Like the default Keras BatchNormalization layer, this will normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. Args: training: If False, the layer will normalize using the moving average and std. dev, without updating the learned avg and std. dev. If None or True, the layer will follow the keras BatchNormalization layer strategy of checking the Keras learning phase at `call` time to decide what to do. **kwargs: The keyword arguments to forward to the keras BatchNormalization layer constructor. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. References: - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167) """ def __init__(self, training=None, **kwargs): super(FreezableBatchNorm, self).__init__(**kwargs) self._training = training def call(self, inputs, training=None): # Override the call arg only if the batchnorm is frozen. (Ignore None) if self._training is False: # pylint: disable=g-bool-id-comparison training = self._training return super(FreezableBatchNorm, self).call(inputs, training=training)
2,987
42.304348
80
py
models
models-master/research/object_detection/core/box_list_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.box_list.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.utils import test_case class BoxListTest(test_case.TestCase): """Tests for BoxList class.""" def test_num_boxes(self): def graph_fn(): data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32) boxes = box_list.BoxList(data) return boxes.num_boxes() num_boxes_out = self.execute(graph_fn, []) self.assertEqual(num_boxes_out, 3) def test_get_correct_center_coordinates_and_sizes(self): boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], np.float32) def graph_fn(boxes): boxes = box_list.BoxList(boxes) centers_sizes = boxes.get_center_coordinates_and_sizes() return centers_sizes centers_sizes_out = self.execute(graph_fn, [boxes]) expected_centers_sizes = [[15, 0.35], [12.5, 0.25], [10, 0.3], [5, 0.3]] self.assertAllClose(centers_sizes_out, expected_centers_sizes) def test_create_box_list_with_dynamic_shape(self): def graph_fn(): data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32) indices = tf.reshape(tf.where(tf.greater([1, 0, 1], 0)), [-1]) data = tf.gather(data, indices) assert data.get_shape().as_list() == [None, 4] boxes = box_list.BoxList(data) return boxes.num_boxes() num_boxes = self.execute(graph_fn, []) self.assertEqual(num_boxes, 2) def test_transpose_coordinates(self): boxes = np.array([[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]], np.float32) def graph_fn(boxes): boxes = box_list.BoxList(boxes) boxes.transpose_coordinates() return boxes.get() transpoded_boxes = self.execute(graph_fn, [boxes]) expected_corners = [[10.0, 10.0, 15.0, 20.0], [0.1, 0.2, 0.4, 0.5]] self.assertAllClose(transpoded_boxes, expected_corners) def test_box_list_invalid_inputs(self): data0 = tf.constant([[[0, 0, 1, 1], [3, 4, 5, 5]]], tf.float32) data1 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.float32) data2 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.int32) with self.assertRaises(ValueError): _ = box_list.BoxList(data0) with self.assertRaises(ValueError): _ = box_list.BoxList(data1) with self.assertRaises(ValueError): _ = box_list.BoxList(data2) def test_num_boxes_static(self): box_corners = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] boxes = box_list.BoxList(tf.constant(box_corners)) self.assertEqual(boxes.num_boxes_static(), 2) self.assertEqual(type(boxes.num_boxes_static()), int) def test_as_tensor_dict(self): boxes = tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32) boxlist = box_list.BoxList(boxes) classes = tf.constant([0, 1]) boxlist.add_field('classes', classes) scores = tf.constant([0.75, 0.2]) boxlist.add_field('scores', scores) tensor_dict = boxlist.as_tensor_dict() self.assertDictEqual(tensor_dict, {'scores': scores, 'classes': classes, 'boxes': boxes}) def test_as_tensor_dict_with_features(self): boxes = tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32) boxlist = box_list.BoxList(boxes) classes = tf.constant([0, 1]) boxlist.add_field('classes', classes) scores = tf.constant([0.75, 0.2]) boxlist.add_field('scores', scores) tensor_dict = boxlist.as_tensor_dict(['scores', 'classes']) self.assertDictEqual(tensor_dict, {'scores': scores, 'classes': classes}) def test_as_tensor_dict_missing_field(self): boxlist = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) boxlist.add_field('classes', tf.constant([0, 1])) boxlist.add_field('scores', tf.constant([0.75, 0.2])) with self.assertRaises(ValueError): boxlist.as_tensor_dict(['foo', 'bar']) if __name__ == '__main__': tf.test.main()
4,812
38.45082
80
py
models
models-master/research/object_detection/core/model.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Abstract detection model. This file defines a generic base class for detection models. Programs that are designed to work with arbitrary detection models should only depend on this class. We intend for the functions in this class to follow tensor-in/tensor-out design, thus all functions have tensors or lists/dictionaries holding tensors as inputs and outputs. Abstractly, detection models predict output tensors given input images which can be passed to a loss function at training time or passed to a postprocessing function at eval time. The computation graphs at a high level consequently look as follows: Training time: inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor) Evaluation time: inputs (images tensor) -> preprocess -> predict -> postprocess -> outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor) DetectionModels must thus implement four functions (1) preprocess, (2) predict, (3) postprocess and (4) loss. DetectionModels should make no assumptions about the input size or aspect ratio --- they are responsible for doing any resize/reshaping necessary (see docstring for the preprocess function). Output classes are always integers in the range [0, num_classes). Any mapping of these integers to semantic labels is to be handled outside of this class. Images are resized in the `preprocess` method. All of `preprocess`, `predict`, and `postprocess` should be reentrant. The `preprocess` method runs `image_resizer_fn` that returns resized_images and `true_image_shapes`. Since `image_resizer_fn` can pad the images with zeros, true_image_shapes indicate the slices that contain the image without padding. This is useful for padding images to be a fixed size for batching. The `postprocess` method uses the true image shapes to clip predictions that lie outside of images. By default, DetectionModels produce bounding box detections; However, we support a handful of auxiliary annotations associated with each bounding box, namely, instance masks and keypoints. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six import tensorflow.compat.v1 as tf from object_detection.core import standard_fields as fields # If using a new enough version of TensorFlow, detection models should be a # tf module or keras model for tracking. try: _BaseClass = tf.keras.layers.Layer except AttributeError: _BaseClass = object class DetectionModel(six.with_metaclass(abc.ABCMeta, _BaseClass)): """Abstract base class for detection models. Extends tf.Module to guarantee variable tracking. """ def __init__(self, num_classes): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include background categories that might be implicitly predicted in various implementations. """ self._num_classes = num_classes self._groundtruth_lists = {} self._training_step = None super(DetectionModel, self).__init__() @property def num_classes(self): return self._num_classes def groundtruth_lists(self, field): """Access list of groundtruth tensors. Args: field: a string key, options are fields.BoxListFields.{boxes,classes,masks,mask_weights,keypoints, keypoint_visibilities, densepose_*, track_ids, temporal_offsets, track_match_flags} fields.InputDataFields.is_annotated. Returns: a list of tensors holding groundtruth information (see also provide_groundtruth function below), with one entry for each image in the batch. Raises: RuntimeError: if the field has not been provided via provide_groundtruth. """ if field not in self._groundtruth_lists: raise RuntimeError('Groundtruth tensor {} has not been provided'.format( field)) return self._groundtruth_lists[field] def groundtruth_has_field(self, field): """Determines whether the groundtruth includes the given field. Args: field: a string key, options are fields.BoxListFields.{boxes,classes,masks,mask_weights,keypoints, keypoint_visibilities, densepose_*, track_ids} or fields.InputDataFields.is_annotated. Returns: True if the groundtruth includes the given field, False otherwise. """ return field in self._groundtruth_lists @property def training_step(self): if self._training_step is None: raise ValueError('Training step was not provided to the model.') return self._training_step @staticmethod def get_side_inputs(features): """Get side inputs from input features. This placeholder method provides a way for a meta-architecture to specify how to grab additional side inputs from input features (in addition to the image itself) and allows models to depend on contextual information. By default, detection models do not use side information (and thus this method returns an empty dictionary by default. However it can be overridden if side inputs are necessary." Args: features: A dictionary of tensors. Returns: An empty dictionary by default. """ return {} @abc.abstractmethod def preprocess(self, inputs): """Input preprocessing. To be overridden by implementations. This function is responsible for any scaling/shifting of input values that is necessary prior to running the detector on an input image. It is also responsible for any resizing, padding that might be necessary as images are assumed to arrive in arbitrary sizes. While this function could conceivably be part of the predict method (below), it is often convenient to keep these separate --- for example, we may want to preprocess on one device, place onto a queue, and let another device (e.g., the GPU) handle prediction. A few important notes about the preprocess function: + We assume that this operation does not have any trainable variables nor does it affect the groundtruth annotations in any way (thus data augmentation operations such as random cropping should be performed externally). + There is no assumption that the batchsize in this function is the same as the batch size in the predict function. In fact, we recommend calling the preprocess function prior to calling any batching operations (which should happen outside of the model) and thus assuming that batch sizes are equal to 1 in the preprocess function. + There is also no explicit assumption that the output resolutions must be fixed across inputs --- this is to support "fully convolutional" settings in which input images can have different shapes/resolutions. Args: inputs: a [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float32 tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. """ pass @abc.abstractmethod def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): """Predict prediction tensors from inputs tensor. Outputs of this function can be passed to loss or postprocess functions. Args: preprocessed_inputs: a [batch, height, width, channels] float32 tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **side_inputs: additional tensors that are required by the network. Returns: prediction_dict: a dictionary holding prediction tensors to be passed to the Loss or Postprocess functions. """ pass @abc.abstractmethod def postprocess(self, prediction_dict, true_image_shapes, **params): """Convert predicted output tensors to final detections. This stage typically performs a few things such as * Non-Max Suppression to remove overlapping detection boxes. * Score conversion and background class removal. Outputs adhere to the following conventions: * Classes are integers in [0, num_classes); background classes are removed and the first non-background class is mapped to 0. If the model produces class-agnostic detections, then no output is produced for classes. * Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max] format and normalized relative to the image window. * `num_detections` is provided for settings where detections are padded to a fixed number of boxes. * We do not specifically assume any kind of probabilistic interpretation of the scores --- the only important thing is their relative ordering. Thus implementations of the postprocess function are free to output logits, probabilities, calibrated probabilities, or anything else. Args: prediction_dict: a dictionary holding prediction tensors. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Additional keyword arguments for specific implementations of DetectionModel. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detections, 4] detection_scores: [batch, max_detections] detection_classes: [batch, max_detections] (If a model is producing class-agnostic detections, this field may be missing) detection_masks: [batch, max_detections, mask_height, mask_width] (optional) detection_keypoints: [batch, max_detections, num_keypoints, 2] (optional) detection_keypoint_scores: [batch, max_detections, num_keypoints] (optional) detection_surface_coords: [batch, max_detections, mask_height, mask_width, 2] (optional) num_detections: [batch] In addition to the above fields this stage also outputs the following raw tensors: raw_detection_boxes: [batch, total_detections, 4] tensor containing all detection boxes from `prediction_dict` in the format [ymin, xmin, ymax, xmax] and normalized co-ordinates. raw_detection_scores: [batch, total_detections, num_classes_with_background] tensor of class score logits for raw detection boxes. """ pass @abc.abstractmethod def loss(self, prediction_dict, true_image_shapes): """Compute scalar loss tensors with respect to provided groundtruth. Calling this function requires that groundtruth tensors have been provided via the provide_groundtruth function. Args: prediction_dict: a dictionary holding predicted tensors true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: a dictionary mapping strings (loss names) to scalar tensors representing loss values. """ pass def provide_groundtruth( self, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list=None, groundtruth_mask_weights_list=None, groundtruth_keypoints_list=None, groundtruth_keypoint_visibilities_list=None, groundtruth_dp_num_points_list=None, groundtruth_dp_part_ids_list=None, groundtruth_dp_surface_coords_list=None, groundtruth_track_ids_list=None, groundtruth_temporal_offsets_list=None, groundtruth_track_match_flags_list=None, groundtruth_weights_list=None, groundtruth_confidences_list=None, groundtruth_is_crowd_list=None, groundtruth_group_of_list=None, groundtruth_area_list=None, is_annotated_list=None, groundtruth_labeled_classes=None, groundtruth_verified_neg_classes=None, groundtruth_not_exhaustive_classes=None, groundtruth_keypoint_depths_list=None, groundtruth_keypoint_depth_weights_list=None, groundtruth_image_classes=None, training_step=None): """Provide groundtruth tensors. Args: groundtruth_boxes_list: a list of 2-D tf.float32 tensors of shape [num_boxes, 4] containing coordinates of the groundtruth boxes. Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and assumed to be normalized and clipped relative to the image window with y_min <= y_max and x_min <= x_max. groundtruth_classes_list: a list of 2-D tf.float32 one-hot (or k-hot) tensors of shape [num_boxes, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. groundtruth_masks_list: a list of 3-D tf.float32 tensors of shape [num_boxes, height_in, width_in] containing instance masks with values in {0, 1}. If None, no masks are provided. Mask resolution `height_in`x`width_in` must agree with the resolution of the input image tensor provided to the `preprocess` function. groundtruth_mask_weights_list: a list of 1-D tf.float32 tensors of shape [num_boxes] with weights for each instance mask. groundtruth_keypoints_list: a list of 3-D tf.float32 tensors of shape [num_boxes, num_keypoints, 2] containing keypoints. Keypoints are assumed to be provided in normalized coordinates and missing keypoints should be encoded as NaN (but it is recommended to use `groundtruth_keypoint_visibilities_list`). groundtruth_keypoint_visibilities_list: a list of 3-D tf.bool tensors of shape [num_boxes, num_keypoints] containing keypoint visibilities. groundtruth_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes] containing the number of DensePose sampled points. groundtruth_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape [num_boxes, max_sampled_points] containing the DensePose part ids (0-indexed) for each sampled point. Note that there may be padding. groundtruth_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape [num_boxes, max_sampled_points, 4] containing the DensePose surface coordinates for each sampled point. Note that there may be padding. groundtruth_track_ids_list: a list of 1-D tf.int32 tensors of shape [num_boxes] containing the track IDs of groundtruth objects. groundtruth_temporal_offsets_list: a list of 2-D tf.float32 tensors of shape [num_boxes, 2] containing the spatial offsets of objects' centers compared with the previous frame. groundtruth_track_match_flags_list: a list of 1-D tf.float32 tensors of shape [num_boxes] containing 0-1 flags that indicate if an object has existed in the previous frame. groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape [num_boxes, num_classes] containing class confidences for groundtruth boxes. groundtruth_is_crowd_list: A list of 1-D tf.bool tensors of shape [num_boxes] containing is_crowd annotations. groundtruth_group_of_list: A list of 1-D tf.bool tensors of shape [num_boxes] containing group_of annotations. groundtruth_area_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing the area (in the original absolute coordinates) of the annotations. is_annotated_list: A list of scalar tf.bool tensors indicating whether images have been labeled or not. groundtruth_labeled_classes: A list of 1-D tf.float32 tensors of shape [num_classes], containing label indices encoded as k-hot of the classes that are exhaustively annotated. groundtruth_verified_neg_classes: A list of 1-D tf.float32 tensors of shape [num_classes], containing a K-hot representation of classes which were verified as not present in the image. groundtruth_not_exhaustive_classes: A list of 1-D tf.float32 tensors of shape [num_classes], containing a K-hot representation of classes which don't have all of their instances marked exhaustively. groundtruth_keypoint_depths_list: a list of 2-D tf.float32 tensors of shape [num_boxes, num_keypoints] containing keypoint relative depths. groundtruth_keypoint_depth_weights_list: a list of 2-D tf.float32 tensors of shape [num_boxes, num_keypoints] containing the weights of the relative depths. groundtruth_image_classes: A list of 1-D tf.float32 tensors of shape [num_classes], containing label indices encoded as k-hot of the classes that are present or not present in the image. training_step: An integer denoting the current training step. This is useful when models want to anneal loss terms. """ self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list self._groundtruth_lists[ fields.BoxListFields.classes] = groundtruth_classes_list if groundtruth_weights_list: self._groundtruth_lists[fields.BoxListFields. weights] = groundtruth_weights_list if groundtruth_confidences_list: self._groundtruth_lists[fields.BoxListFields. confidences] = groundtruth_confidences_list if groundtruth_masks_list: self._groundtruth_lists[ fields.BoxListFields.masks] = groundtruth_masks_list if groundtruth_mask_weights_list: self._groundtruth_lists[ fields.BoxListFields.mask_weights] = groundtruth_mask_weights_list if groundtruth_keypoints_list: self._groundtruth_lists[ fields.BoxListFields.keypoints] = groundtruth_keypoints_list if groundtruth_keypoint_visibilities_list: self._groundtruth_lists[ fields.BoxListFields.keypoint_visibilities] = ( groundtruth_keypoint_visibilities_list) if groundtruth_keypoint_depths_list: self._groundtruth_lists[ fields.BoxListFields.keypoint_depths] = ( groundtruth_keypoint_depths_list) if groundtruth_keypoint_depth_weights_list: self._groundtruth_lists[ fields.BoxListFields.keypoint_depth_weights] = ( groundtruth_keypoint_depth_weights_list) if groundtruth_dp_num_points_list: self._groundtruth_lists[ fields.BoxListFields.densepose_num_points] = ( groundtruth_dp_num_points_list) if groundtruth_dp_part_ids_list: self._groundtruth_lists[ fields.BoxListFields.densepose_part_ids] = ( groundtruth_dp_part_ids_list) if groundtruth_dp_surface_coords_list: self._groundtruth_lists[ fields.BoxListFields.densepose_surface_coords] = ( groundtruth_dp_surface_coords_list) if groundtruth_track_ids_list: self._groundtruth_lists[ fields.BoxListFields.track_ids] = groundtruth_track_ids_list if groundtruth_temporal_offsets_list: self._groundtruth_lists[ fields.BoxListFields.temporal_offsets] = ( groundtruth_temporal_offsets_list) if groundtruth_track_match_flags_list: self._groundtruth_lists[ fields.BoxListFields.track_match_flags] = ( groundtruth_track_match_flags_list) if groundtruth_is_crowd_list: self._groundtruth_lists[ fields.BoxListFields.is_crowd] = groundtruth_is_crowd_list if groundtruth_group_of_list: self._groundtruth_lists[ fields.BoxListFields.group_of] = groundtruth_group_of_list if groundtruth_area_list: self._groundtruth_lists[ fields.InputDataFields.groundtruth_area] = groundtruth_area_list if is_annotated_list: self._groundtruth_lists[ fields.InputDataFields.is_annotated] = is_annotated_list if groundtruth_labeled_classes: self._groundtruth_lists[ fields.InputDataFields .groundtruth_labeled_classes] = groundtruth_labeled_classes if groundtruth_verified_neg_classes: self._groundtruth_lists[ fields.InputDataFields .groundtruth_verified_neg_classes] = groundtruth_verified_neg_classes if groundtruth_image_classes: self._groundtruth_lists[ fields.InputDataFields .groundtruth_image_classes] = groundtruth_image_classes if groundtruth_not_exhaustive_classes: self._groundtruth_lists[ fields.InputDataFields .groundtruth_not_exhaustive_classes] = ( groundtruth_not_exhaustive_classes) if training_step is not None: self._training_step = training_step @abc.abstractmethod def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ pass @abc.abstractmethod def restore_map(self, fine_tune_checkpoint_type='detection', load_all_detection_checkpoint_vars=False): """Returns a map of variables to load from a foreign checkpoint. Returns a map of variable names to load from a checkpoint to variables in the model graph. This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. load_all_detection_checkpoint_vars: whether to load all variables (when `fine_tune_checkpoint_type` is `detection`). If False, only variables within the feature extractor scope are included. Default False. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ pass @abc.abstractmethod def restore_from_objects(self, fine_tune_checkpoint_type='detection'): """Returns a map of variables to load from a foreign checkpoint. Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module or Checkpoint). This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Note that this function is intended to be used to restore Keras-based models when running Tensorflow 2, whereas restore_map (above) is intended to be used to restore Slim-based models when running Tensorflow 1.x. TODO(jonathanhuang,rathodv): Check tf_version and raise unimplemented error for both restore_map and restore_from_objects depending on version. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. Returns: A dict mapping keys to Trackable objects (tf.Module or Checkpoint). """ pass @abc.abstractmethod def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ pass def call(self, images): """Returns detections from a batch of images. This method calls the preprocess, predict and postprocess function sequentially and returns the output. Args: images: a [batch_size, height, width, channels] float tensor. Returns: detetcions: The dict of tensors returned by the postprocess function. """ preprocessed_images, shapes = self.preprocess(images) prediction_dict = self.predict(preprocessed_images, shapes) return self.postprocess(prediction_dict, shapes)
26,202
43.187184
80
py
models
models-master/research/object_detection/core/box_list_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Bounding Box List operations. Example box operations that are supported: * areas: compute bounding box areas * iou: pairwise intersection-over-union scores * sq_dist: pairwise distances between bounding boxes Whenever box_list_ops functions output a BoxList, the fields of the incoming BoxList are retained unless documented otherwise. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.utils import ops from object_detection.utils import shape_utils class SortOrder(object): """Enum class for sort order. Attributes: ascend: ascend order. descend: descend order. """ ascend = 1 descend = 2 def area(boxlist, scope=None): """Computes area of boxes. Args: boxlist: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing box areas. """ with tf.name_scope(scope, 'Area'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) return tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) def height_width(boxlist, scope=None): """Computes height and width of boxes in boxlist. Args: boxlist: BoxList holding N boxes scope: name scope. Returns: Height: A tensor with shape [N] representing box heights. Width: A tensor with shape [N] representing box widths. """ with tf.name_scope(scope, 'HeightWidth'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1]) def scale(boxlist, y_scale, x_scale, scope=None): """scale box coordinates in x and y dimensions. Args: boxlist: BoxList holding N boxes y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: boxlist: BoxList holding N boxes """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) y_min = y_scale * y_min y_max = y_scale * y_max x_min = x_scale * x_min x_max = x_scale * x_max scaled_boxlist = box_list.BoxList( tf.concat([y_min, x_min, y_max, x_max], 1)) return _copy_extra_fields(scaled_boxlist, boxlist) def scale_height_width(boxlist, y_scale, x_scale, scope=None): """Scale the height and width of boxes, leaving centers unchanged. Args: boxlist: BoxList holding N boxes y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: boxlist: BoxList holding N boxes """ with tf.name_scope(scope, 'ScaleHeightWidth'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) yc, xc, height_orig, width_orig = boxlist.get_center_coordinates_and_sizes() y_min = yc - 0.5 * y_scale * height_orig y_max = yc + 0.5 * y_scale * height_orig x_min = xc - 0.5 * x_scale * width_orig x_max = xc + 0.5 * x_scale * width_orig scaled_boxlist = box_list.BoxList( tf.stack([y_min, x_min, y_max, x_max], 1)) return _copy_extra_fields(scaled_boxlist, boxlist) def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None): """Clip bounding boxes to a window. This op clips any input bounding boxes (represented by bounding box corners) to a window, optionally filtering out boxes that do not overlap at all with the window. Args: boxlist: BoxList holding M_in boxes window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip boxes. filter_nonoverlapping: whether to filter out boxes that do not overlap at all with the window. scope: name scope. Returns: a BoxList holding M_out boxes where M_out <= M_in """ with tf.name_scope(scope, 'ClipToWindow'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) win_y_min = window[0] win_x_min = window[1] win_y_max = window[2] win_x_max = window[3] y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min) y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min) x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min) x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min) clipped = box_list.BoxList( tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped], 1)) clipped = _copy_extra_fields(clipped, boxlist) if filter_nonoverlapping: areas = area(clipped) nonzero_area_indices = tf.cast( tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32) clipped = gather(clipped, nonzero_area_indices) return clipped def prune_outside_window(boxlist, window, scope=None): """Prunes bounding boxes that fall outside a given window. This function prunes bounding boxes that even partially fall outside the given window. See also clip_to_window which only prunes bounding boxes that fall completely outside the window, and clips any bounding boxes that partially overflow. Args: boxlist: a BoxList holding M_in boxes. window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] of the window scope: name scope. Returns: pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes in the input tensor. """ with tf.name_scope(scope, 'PruneOutsideWindow'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) coordinate_violations = tf.concat([ tf.less(y_min, win_y_min), tf.less(x_min, win_x_min), tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max) ], 1) valid_indices = tf.reshape( tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) return gather(boxlist, valid_indices), valid_indices def prune_completely_outside_window(boxlist, window, scope=None): """Prunes bounding boxes that fall completely outside of the given window. The function clip_to_window prunes bounding boxes that fall completely outside the window, but also clips any bounding boxes that partially overflow. This function does not clip partially overflowing boxes. Args: boxlist: a BoxList holding M_in boxes. window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] of the window scope: name scope. Returns: pruned_boxlist: a new BoxList with all bounding boxes partially or fully in the window. valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes in the input tensor. """ with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) coordinate_violations = tf.concat([ tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max), tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min) ], 1) valid_indices = tf.reshape( tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) return gather(boxlist, valid_indices), valid_indices def intersection(boxlist1, boxlist2, scope=None): """Compute pairwise intersection areas between boxes. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise intersections """ with tf.name_scope(scope, 'Intersection'): y_min1, x_min1, y_max1, x_max1 = tf.split( value=boxlist1.get(), num_or_size_splits=4, axis=1) y_min2, x_min2, y_max2, x_max2 = tf.split( value=boxlist2.get(), num_or_size_splits=4, axis=1) all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths def matched_intersection(boxlist1, boxlist2, scope=None): """Compute intersection areas between corresponding boxes in two boxlists. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing pairwise intersections """ with tf.name_scope(scope, 'MatchedIntersection'): y_min1, x_min1, y_max1, x_max1 = tf.split( value=boxlist1.get(), num_or_size_splits=4, axis=1) y_min2, x_min2, y_max2, x_max2 = tf.split( value=boxlist2.get(), num_or_size_splits=4, axis=1) min_ymax = tf.minimum(y_max1, y_max2) max_ymin = tf.maximum(y_min1, y_min2) intersect_heights = tf.maximum(0.0, min_ymax - max_ymin) min_xmax = tf.minimum(x_max1, x_max2) max_xmin = tf.maximum(x_min1, x_min2) intersect_widths = tf.maximum(0.0, min_xmax - max_xmin) return tf.reshape(intersect_heights * intersect_widths, [-1]) def iou(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise iou scores. """ with tf.name_scope(scope, 'IOU'): intersections = intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = ( tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions)) def l1(boxlist1, boxlist2, scope=None): """Computes l1 loss (pairwise) between two boxlists. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing the pairwise L1 loss. """ with tf.name_scope(scope, 'PairwiseL1'): ycenter1, xcenter1, h1, w1 = boxlist1.get_center_coordinates_and_sizes() ycenter2, xcenter2, h2, w2 = boxlist2.get_center_coordinates_and_sizes() ycenters = tf.abs(tf.expand_dims(ycenter2, axis=0) - tf.expand_dims( tf.transpose(ycenter1), axis=1)) xcenters = tf.abs(tf.expand_dims(xcenter2, axis=0) - tf.expand_dims( tf.transpose(xcenter1), axis=1)) heights = tf.abs(tf.expand_dims(h2, axis=0) - tf.expand_dims( tf.transpose(h1), axis=1)) widths = tf.abs(tf.expand_dims(w2, axis=0) - tf.expand_dims( tf.transpose(w1), axis=1)) return ycenters + xcenters + heights + widths def giou(boxlist1, boxlist2, scope=None): """Computes pairwise generalized IOU between two boxlists. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing the pairwise GIoU loss. """ with tf.name_scope(scope, 'PairwiseGIoU'): n = boxlist1.num_boxes() m = boxlist2.num_boxes() boxes1 = tf.repeat(boxlist1.get(), repeats=m, axis=0) boxes2 = tf.tile(boxlist2.get(), multiples=[n, 1]) return tf.reshape(ops.giou(boxes1, boxes2), [n, m]) def matched_iou(boxlist1, boxlist2, scope=None): """Compute intersection-over-union between corresponding boxes in boxlists. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing pairwise iou scores. """ with tf.name_scope(scope, 'MatchedIOU'): intersections = matched_intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = areas1 + areas2 - intersections return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions)) def ioa(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-area between box collections. intersection-over-area (IOA) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, ioa(box1, box2) != ioa(box2, box1). Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise ioa scores. """ with tf.name_scope(scope, 'IOA'): intersections = intersection(boxlist1, boxlist2) areas = tf.expand_dims(area(boxlist2), 0) return tf.truediv(intersections, areas) def prune_non_overlapping_boxes( boxlist1, boxlist2, min_overlap=0.0, scope=None): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. min_overlap: Minimum required overlap between boxes, to count them as overlapping. scope: name scope. Returns: new_boxlist1: A pruned boxlist with size [N', 4]. keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the first input BoxList `boxlist1`. """ with tf.name_scope(scope, 'PruneNonOverlappingBoxes'): ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap)) keep_inds = tf.squeeze(tf.where(keep_bool), axis=[1]) new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1, keep_inds def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1])) def change_coordinate_frame(boxlist, window, scope=None): """Change coordinate frame of the boxlist to be relative to window's frame. Given a window of the form [ymin, xmin, ymax, xmax], changes bounding box coordinates from boxlist to be relative to this window (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)). An example use case is data augmentation: where we are given groundtruth boxes (boxlist) and would like to randomly crop the image to some window (window). In this case we need to change the coordinate frame of each groundtruth box to be relative to this new window. Args: boxlist: A BoxList object holding N boxes. window: A rank 1 tensor [4]. scope: name scope. Returns: Returns a BoxList object with N boxes. """ with tf.name_scope(scope, 'ChangeCoordinateFrame'): win_height = window[2] - window[0] win_width = window[3] - window[1] boxlist_new = scale(box_list.BoxList( boxlist.get() - [window[0], window[1], window[0], window[1]]), 1.0 / win_height, 1.0 / win_width) boxlist_new = _copy_extra_fields(boxlist_new, boxlist) return boxlist_new def sq_dist(boxlist1, boxlist2, scope=None): """Computes the pairwise squared distances between box corners. This op treats each box as if it were a point in a 4d Euclidean space and computes pairwise squared distances. Mathematically, we are given two matrices of box coordinates X and Y, where X(i,:) is the i'th row of X, containing the 4 numbers defining the corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to boxlist2. We compute Z(i,j) = ||X(i,:) - Y(j,:)||^2 = ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:), Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise distances """ with tf.name_scope(scope, 'SqDist'): sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True) sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True) innerprod = tf.matmul(boxlist1.get(), boxlist2.get(), transpose_a=False, transpose_b=True) return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod def boolean_mask(boxlist, indicator, fields=None, scope=None, use_static_shapes=False, indicator_sum=None): """Select boxes from BoxList according to indicator and return new BoxList. `boolean_mask` returns the subset of boxes that are marked as "True" by the indicator tensor. By default, `boolean_mask` returns boxes corresponding to the input index list, as well as all additional fields stored in the boxlist (indexing into the first dimension). However one can optionally only draw from a subset of fields. Args: boxlist: BoxList holding N boxes indicator: a rank-1 boolean tensor fields: (optional) list of fields to also gather from. If None (default), all fields are gathered from. Pass an empty fields list to only gather the box coordinates. scope: name scope. use_static_shapes: Whether to use an implementation with static shape gurantees. indicator_sum: An integer containing the sum of `indicator` vector. Only required if `use_static_shape` is True. Returns: subboxlist: a BoxList corresponding to the subset of the input BoxList specified by indicator Raises: ValueError: if `indicator` is not a rank-1 boolean tensor. """ with tf.name_scope(scope, 'BooleanMask'): if indicator.shape.ndims != 1: raise ValueError('indicator should have rank 1') if indicator.dtype != tf.bool: raise ValueError('indicator should be a boolean tensor') if use_static_shapes: if not (indicator_sum and isinstance(indicator_sum, int)): raise ValueError('`indicator_sum` must be a of type int') selected_positions = tf.cast(indicator, dtype=tf.float32) indexed_positions = tf.cast( tf.multiply( tf.cumsum(selected_positions), selected_positions), dtype=tf.int32) one_hot_selector = tf.one_hot( indexed_positions - 1, indicator_sum, dtype=tf.float32) sampled_indices = tf.cast( tf.tensordot( tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32), one_hot_selector, axes=[0, 0]), dtype=tf.int32) return gather(boxlist, sampled_indices, use_static_shapes=True) else: subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator)) if fields is None: fields = boxlist.get_extra_fields() for field in fields: if not boxlist.has_field(field): raise ValueError('boxlist must contain all specified fields') subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator) subboxlist.add_field(field, subfieldlist) return subboxlist def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False): """Gather boxes from BoxList according to indices and return new BoxList. By default, `gather` returns boxes corresponding to the input index list, as well as all additional fields stored in the boxlist (indexing into the first dimension). However one can optionally only gather from a subset of fields. Args: boxlist: BoxList holding N boxes indices: a rank-1 tensor of type int32 / int64 fields: (optional) list of fields to also gather from. If None (default), all fields are gathered from. Pass an empty fields list to only gather the box coordinates. scope: name scope. use_static_shapes: Whether to use an implementation with static shape gurantees. Returns: subboxlist: a BoxList corresponding to the subset of the input BoxList specified by indices Raises: ValueError: if specified field is not contained in boxlist or if the indices are not of type int32 """ with tf.name_scope(scope, 'Gather'): if len(indices.shape.as_list()) != 1: raise ValueError('indices should have rank 1') if indices.dtype != tf.int32 and indices.dtype != tf.int64: raise ValueError('indices should be an int32 / int64 tensor') gather_op = tf.gather if use_static_shapes: gather_op = ops.matmul_gather_on_zeroth_axis subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices)) if fields is None: fields = boxlist.get_extra_fields() fields += ['boxes'] for field in fields: if not boxlist.has_field(field): raise ValueError('boxlist must contain all specified fields') subfieldlist = gather_op(boxlist.get_field(field), indices) subboxlist.add_field(field, subfieldlist) return subboxlist def concatenate(boxlists, fields=None, scope=None): """Concatenate list of BoxLists. This op concatenates a list of input BoxLists into a larger BoxList. It also handles concatenation of BoxList fields as long as the field tensor shapes are equal except for the first dimension. Args: boxlists: list of BoxList objects fields: optional list of fields to also concatenate. By default, all fields from the first BoxList in the list are included in the concatenation. scope: name scope. Returns: a BoxList with number of boxes equal to sum([boxlist.num_boxes() for boxlist in BoxList]) Raises: ValueError: if boxlists is invalid (i.e., is not a list, is empty, or contains non BoxList objects), or if requested fields are not contained in all boxlists """ with tf.name_scope(scope, 'Concatenate'): if not isinstance(boxlists, list): raise ValueError('boxlists should be a list') if not boxlists: raise ValueError('boxlists should have nonzero length') for boxlist in boxlists: if not isinstance(boxlist, box_list.BoxList): raise ValueError('all elements of boxlists should be BoxList objects') concatenated = box_list.BoxList( tf.concat([boxlist.get() for boxlist in boxlists], 0)) if fields is None: fields = boxlists[0].get_extra_fields() for field in fields: first_field_shape = boxlists[0].get_field(field).get_shape().as_list() first_field_shape[0] = -1 if None in first_field_shape: raise ValueError('field %s must have fully defined shape except for the' ' 0th dimension.' % field) for boxlist in boxlists: if not boxlist.has_field(field): raise ValueError('boxlist must contain all requested fields') field_shape = boxlist.get_field(field).get_shape().as_list() field_shape[0] = -1 if field_shape != first_field_shape: raise ValueError('field %s must have same shape for all boxlists ' 'except for the 0th dimension.' % field) concatenated_field = tf.concat( [boxlist.get_field(field) for boxlist in boxlists], 0) concatenated.add_field(field, concatenated_field) return concatenated def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None): """Sort boxes and associated fields according to a scalar field. A common use case is reordering the boxes according to descending scores. Args: boxlist: BoxList holding N boxes. field: A BoxList field for sorting and reordering the BoxList. order: (Optional) descend or ascend. Default is descend. scope: name scope. Returns: sorted_boxlist: A sorted BoxList with the field in the specified order. Raises: ValueError: if specified field does not exist ValueError: if the order is not either descend or ascend """ with tf.name_scope(scope, 'SortByField'): if order != SortOrder.descend and order != SortOrder.ascend: raise ValueError('Invalid sort order') field_to_sort = boxlist.get_field(field) if len(field_to_sort.shape.as_list()) != 1: raise ValueError('Field should have rank 1') num_boxes = boxlist.num_boxes() num_entries = tf.size(field_to_sort) length_assert = tf.Assert( tf.equal(num_boxes, num_entries), ['Incorrect field size: actual vs expected.', num_entries, num_boxes]) with tf.control_dependencies([length_assert]): _, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True) if order == SortOrder.ascend: sorted_indices = tf.reverse_v2(sorted_indices, [0]) return gather(boxlist, sorted_indices) def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None): """Overlay bounding box list on image. Currently this visualization plots a 1 pixel thick red bounding box on top of the image. Note that tf.image.draw_bounding_boxes essentially is 1 indexed. Args: image: an image tensor with shape [height, width, 3] boxlist: a BoxList normalized: (boolean) specify whether corners are to be interpreted as absolute coordinates in image space or normalized with respect to the image size. scope: name scope. Returns: image_and_boxes: an image tensor with shape [height, width, 3] """ with tf.name_scope(scope, 'VisualizeBoxesInImage'): if not normalized: height, width, _ = tf.unstack(tf.shape(image)) boxlist = scale(boxlist, 1.0 / tf.cast(height, tf.float32), 1.0 / tf.cast(width, tf.float32)) corners = tf.expand_dims(boxlist.get(), 0) image = tf.expand_dims(image, 0) return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0]) def filter_field_value_equals(boxlist, field, value, scope=None): """Filter to keep only boxes with field entries equal to the given value. Args: boxlist: BoxList holding N boxes. field: field name for filtering. value: scalar value. scope: name scope. Returns: a BoxList holding M boxes where M <= N Raises: ValueError: if boxlist not a BoxList object or if it does not have the specified field. """ with tf.name_scope(scope, 'FilterFieldValueEquals'): if not isinstance(boxlist, box_list.BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field(field): raise ValueError('boxlist must contain the specified field') filter_field = boxlist.get_field(field) gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1]) return gather(boxlist, gather_index) def filter_greater_than(boxlist, thresh, scope=None): """Filter to keep only boxes with score exceeding a given threshold. This op keeps the collection of boxes whose corresponding scores are greater than the input threshold. TODO(jonathanhuang): Change function name to filter_scores_greater_than Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. thresh: scalar threshold scope: name scope. Returns: a BoxList holding M boxes where M <= N Raises: ValueError: if boxlist not a BoxList object or if it does not have a scores field """ with tf.name_scope(scope, 'FilterGreaterThan'): if not isinstance(boxlist, box_list.BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field('scores'): raise ValueError('input boxlist must have \'scores\' field') scores = boxlist.get_field('scores') if len(scores.shape.as_list()) > 2: raise ValueError('Scores should have rank 1 or 2') if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1: raise ValueError('Scores should have rank 1 or have shape ' 'consistent with [None, 1]') high_score_indices = tf.cast(tf.reshape( tf.where(tf.greater(scores, thresh)), [-1]), tf.int32) return gather(boxlist, high_score_indices) def non_max_suppression(boxlist, thresh, max_output_size, scope=None): """Non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. Note that this only works for a single class --- to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression. Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. thresh: scalar threshold max_output_size: maximum number of retained boxes scope: name scope. Returns: a BoxList holding M boxes where M <= max_output_size Raises: ValueError: if thresh is not in [0, 1] """ with tf.name_scope(scope, 'NonMaxSuppression'): if not 0 <= thresh <= 1.0: raise ValueError('thresh must be between 0 and 1') if not isinstance(boxlist, box_list.BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field('scores'): raise ValueError('input boxlist must have \'scores\' field') selected_indices = tf.image.non_max_suppression( boxlist.get(), boxlist.get_field('scores'), max_output_size, iou_threshold=thresh) return gather(boxlist, selected_indices) def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. Args: boxlist_to_copy_to: BoxList to which extra fields are copied. boxlist_to_copy_from: BoxList from which fields are copied. Returns: boxlist_to_copy_to with extra fields. """ for field in boxlist_to_copy_from.get_extra_fields(): boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) return boxlist_to_copy_to def to_normalized_coordinates(boxlist, height, width, check_range=True, scope=None): """Converts absolute box coordinates to normalized coordinates in [0, 1]. Usually one uses the dynamic shape of the image or conv-layer tensor: boxlist = box_list_ops.to_normalized_coordinates(boxlist, tf.shape(images)[1], tf.shape(images)[2]), This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: boxlist: BoxList with coordinates in terms of pixel-locations. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: boxlist with normalized coordinates in [0, 1]. """ with tf.name_scope(scope, 'ToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(boxlist.get()) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, 1 / height, 1 / width) def to_absolute_coordinates(boxlist, height, width, check_range=True, maximum_normalized_coordinate=1.1, scope=None): """Converts normalized box coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum box coordinate value is larger than maximum_normalized_coordinate (in which case coordinates are already absolute). Args: boxlist: BoxList with coordinates in range [0, 1]. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. scope: name scope. Returns: boxlist with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input boxes is correct. if check_range: box_maximum = tf.reduce_max(boxlist.get()) max_assert = tf.Assert( tf.greater_equal(maximum_normalized_coordinate, box_maximum), ['maximum box coordinate value is larger ' 'than %f: ' % maximum_normalized_coordinate, box_maximum]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, height, width) def refine_boxes_multi_class(pool_boxes, num_classes, nms_iou_thresh, nms_max_detections, voting_iou_thresh=0.5): """Refines a pool of boxes using non max suppression and box voting. Box refinement is done independently for each class. Args: pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must have a rank 1 'scores' field and a rank 1 'classes' field. num_classes: (int scalar) Number of classes. nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). nms_max_detections: (int scalar) maximum output size for NMS. voting_iou_thresh: (float scalar) iou threshold for box voting. Returns: BoxList of refined boxes. Raises: ValueError: if a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. b) pool_boxes is not a BoxList. c) pool_boxes does not have a scores and classes field. """ if not 0.0 <= nms_iou_thresh <= 1.0: raise ValueError('nms_iou_thresh must be between 0 and 1') if not 0.0 <= voting_iou_thresh <= 1.0: raise ValueError('voting_iou_thresh must be between 0 and 1') if not isinstance(pool_boxes, box_list.BoxList): raise ValueError('pool_boxes must be a BoxList') if not pool_boxes.has_field('scores'): raise ValueError('pool_boxes must have a \'scores\' field') if not pool_boxes.has_field('classes'): raise ValueError('pool_boxes must have a \'classes\' field') refined_boxes = [] for i in range(num_classes): boxes_class = filter_field_value_equals(pool_boxes, 'classes', i) refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh, nms_max_detections, voting_iou_thresh) refined_boxes.append(refined_boxes_class) return sort_by_field(concatenate(refined_boxes), 'scores') def refine_boxes(pool_boxes, nms_iou_thresh, nms_max_detections, voting_iou_thresh=0.5): """Refines a pool of boxes using non max suppression and box voting. Args: pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must have a rank 1 'scores' field. nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). nms_max_detections: (int scalar) maximum output size for NMS. voting_iou_thresh: (float scalar) iou threshold for box voting. Returns: BoxList of refined boxes. Raises: ValueError: if a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. b) pool_boxes is not a BoxList. c) pool_boxes does not have a scores field. """ if not 0.0 <= nms_iou_thresh <= 1.0: raise ValueError('nms_iou_thresh must be between 0 and 1') if not 0.0 <= voting_iou_thresh <= 1.0: raise ValueError('voting_iou_thresh must be between 0 and 1') if not isinstance(pool_boxes, box_list.BoxList): raise ValueError('pool_boxes must be a BoxList') if not pool_boxes.has_field('scores'): raise ValueError('pool_boxes must have a \'scores\' field') nms_boxes = non_max_suppression( pool_boxes, nms_iou_thresh, nms_max_detections) return box_voting(nms_boxes, pool_boxes, voting_iou_thresh) def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5): """Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015. Performs box voting as described in 'Object detection via a multi-region & semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes with iou overlap >= iou_thresh. The location of B is set to the weighted average location of boxes in S (scores are used for weighting). And the score of B is set to the average score of boxes in S. Args: selected_boxes: BoxList containing a subset of boxes in pool_boxes. These boxes are usually selected from pool_boxes using non max suppression. pool_boxes: BoxList containing a set of (possibly redundant) boxes. iou_thresh: (float scalar) iou threshold for matching boxes in selected_boxes and pool_boxes. Returns: BoxList containing averaged locations and scores for each box in selected_boxes. Raises: ValueError: if a) selected_boxes or pool_boxes is not a BoxList. b) if iou_thresh is not in [0, 1]. c) pool_boxes does not have a scores field. """ if not 0.0 <= iou_thresh <= 1.0: raise ValueError('iou_thresh must be between 0 and 1') if not isinstance(selected_boxes, box_list.BoxList): raise ValueError('selected_boxes must be a BoxList') if not isinstance(pool_boxes, box_list.BoxList): raise ValueError('pool_boxes must be a BoxList') if not pool_boxes.has_field('scores'): raise ValueError('pool_boxes must have a \'scores\' field') iou_ = iou(selected_boxes, pool_boxes) match_indicator = tf.cast(tf.greater(iou_, iou_thresh), dtype=tf.float32) num_matches = tf.reduce_sum(match_indicator, 1) # TODO(kbanoop): Handle the case where some boxes in selected_boxes do not # match to any boxes in pool_boxes. For such boxes without any matches, we # should return the original boxes without voting. match_assert = tf.Assert( tf.reduce_all(tf.greater(num_matches, 0)), ['Each box in selected_boxes must match with at least one box ' 'in pool_boxes.']) scores = tf.expand_dims(pool_boxes.get_field('scores'), 1) scores_assert = tf.Assert( tf.reduce_all(tf.greater_equal(scores, 0)), ['Scores must be non negative.']) with tf.control_dependencies([scores_assert, match_assert]): sum_scores = tf.matmul(match_indicator, scores) averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches box_locations = tf.matmul(match_indicator, pool_boxes.get() * scores) / sum_scores averaged_boxes = box_list.BoxList(box_locations) _copy_extra_fields(averaged_boxes, selected_boxes) averaged_boxes.add_field('scores', averaged_scores) return averaged_boxes def pad_or_clip_box_list(boxlist, num_boxes, scope=None): """Pads or clips all fields of a BoxList. Args: boxlist: A BoxList with arbitrary of number of boxes. num_boxes: First num_boxes in boxlist are kept. The fields are zero-padded if num_boxes is bigger than the actual number of boxes. scope: name scope. Returns: BoxList with all fields padded or clipped. """ with tf.name_scope(scope, 'PadOrClipBoxList'): subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor( boxlist.get(), num_boxes)) for field in boxlist.get_extra_fields(): subfield = shape_utils.pad_or_clip_tensor( boxlist.get_field(field), num_boxes) subboxlist.add_field(field, subfield) return subboxlist def select_random_box(boxlist, default_box=None, seed=None, scope=None): """Selects a random bounding box from a `BoxList`. Args: boxlist: A BoxList. default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, this default box will be returned. If None, will use a default box of [[-1., -1., -1., -1.]]. seed: Random seed. scope: Name scope. Returns: bbox: A [1, 4] tensor with a random bounding box. valid: A bool tensor indicating whether a valid bounding box is returned (True) or whether the default box is returned (False). """ with tf.name_scope(scope, 'SelectRandomBox'): bboxes = boxlist.get() combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes) number_of_boxes = combined_shape[0] default_box = default_box or tf.constant([[-1., -1., -1., -1.]]) def select_box(): random_index = tf.random_uniform([], maxval=number_of_boxes, dtype=tf.int32, seed=seed) return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True) return tf.cond( tf.greater_equal(number_of_boxes, 1), true_fn=select_box, false_fn=lambda: (default_box, tf.constant(False))) def get_minimal_coverage_box(boxlist, default_box=None, scope=None): """Creates a single bounding box which covers all boxes in the boxlist. Args: boxlist: A Boxlist. default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, this default box will be returned. If None, will use a default box of [[0., 0., 1., 1.]]. scope: Name scope. Returns: A [1, 4] float32 tensor with a bounding box that tightly covers all the boxes in the box list. If the boxlist does not contain any boxes, the default box is returned. """ with tf.name_scope(scope, 'CreateCoverageBox'): num_boxes = boxlist.num_boxes() def coverage_box(bboxes): y_min, x_min, y_max, x_max = tf.split( value=bboxes, num_or_size_splits=4, axis=1) y_min_coverage = tf.reduce_min(y_min, axis=0) x_min_coverage = tf.reduce_min(x_min, axis=0) y_max_coverage = tf.reduce_max(y_max, axis=0) x_max_coverage = tf.reduce_max(x_max, axis=0) return tf.stack( [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage], axis=1) default_box = default_box or tf.constant([[0., 0., 1., 1.]]) return tf.cond( tf.greater_equal(num_boxes, 1), true_fn=lambda: coverage_box(boxlist.get()), false_fn=lambda: default_box) def sample_boxes_by_jittering(boxlist, num_boxes_to_sample, stddev=0.1, scope=None): """Samples num_boxes_to_sample boxes by jittering around boxlist boxes. It is possible that this function might generate boxes with size 0. The larger the stddev, this is more probable. For a small stddev of 0.1 this probability is very small. Args: boxlist: A boxlist containing N boxes in normalized coordinates. num_boxes_to_sample: A positive integer containing the number of boxes to sample. stddev: Standard deviation. This is used to draw random offsets for the box corners from a normal distribution. The offset is multiplied by the box size so will be larger in terms of pixels for larger boxes. scope: Name scope. Returns: sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in normalized coordinates. """ with tf.name_scope(scope, 'SampleBoxesByJittering'): num_boxes = boxlist.num_boxes() box_indices = tf.random_uniform( [num_boxes_to_sample], minval=0, maxval=num_boxes, dtype=tf.int32) sampled_boxes = tf.gather(boxlist.get(), box_indices) sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0] sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1] rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0] minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1] maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2] maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3] maxy = tf.maximum(miny, maxy) maxx = tf.maximum(minx, maxx) sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1) sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0) return box_list.BoxList(sampled_boxes)
46,338
37.170511
80
py
models
models-master/research/object_detection/core/freezable_sync_batch_norm.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A freezable batch norm layer that uses Keras sync batch normalization.""" import tensorflow as tf class FreezableSyncBatchNorm(tf.keras.layers.experimental.SyncBatchNormalization ): """Sync Batch normalization layer (Ioffe and Szegedy, 2014). This is a `freezable` batch norm layer that supports setting the `training` parameter in the __init__ method rather than having to set it either via the Keras learning phase or via the `call` method parameter. This layer will forward all other parameters to the Keras `SyncBatchNormalization` layer This is class is necessary because Object Detection model training sometimes requires batch normalization layers to be `frozen` and used as if it was evaluation time, despite still training (and potentially using dropout layers) Like the default Keras SyncBatchNormalization layer, this will normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. References: - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167) """ def __init__(self, training=None, **kwargs): """Constructor. Args: training: If False, the layer will normalize using the moving average and std. dev, without updating the learned avg and std. dev. If None or True, the layer will follow the keras SyncBatchNormalization layer strategy of checking the Keras learning phase at `call` time to decide what to do. **kwargs: The keyword arguments to forward to the keras SyncBatchNormalization layer constructor. """ super(FreezableSyncBatchNorm, self).__init__(**kwargs) self._training = training def call(self, inputs, training=None): # Override the call arg only if the batchnorm is frozen. (Ignore None) if self._training is False: # pylint: disable=g-bool-id-comparison training = self._training return super(FreezableSyncBatchNorm, self).call(inputs, training=training)
3,093
42.577465
80
py
models
models-master/research/object_detection/core/freezable_batch_norm_tf2_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.freezable_batch_norm.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from absl.testing import parameterized import numpy as np from six.moves import zip import tensorflow as tf from object_detection.core import freezable_batch_norm from object_detection.utils import tf_version # pylint: disable=g-import-not-at-top if tf_version.is_tf2(): from object_detection.core import freezable_sync_batch_norm # pylint: enable=g-import-not-at-top @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class FreezableBatchNormTest(tf.test.TestCase, parameterized.TestCase): """Tests for FreezableBatchNorm operations.""" def _build_model(self, use_sync_batch_norm, training=None): model = tf.keras.models.Sequential() norm = None if use_sync_batch_norm: norm = freezable_sync_batch_norm.FreezableSyncBatchNorm(training=training, input_shape=(10,), momentum=0.8) else: norm = freezable_batch_norm.FreezableBatchNorm(training=training, input_shape=(10,), momentum=0.8) model.add(norm) return model, norm def _copy_weights(self, source_weights, target_weights): for source, target in zip(source_weights, target_weights): target.assign(source) def _train_freezable_batch_norm(self, training_mean, training_var, use_sync_batch_norm): model, _ = self._build_model(use_sync_batch_norm=use_sync_batch_norm) model.compile(loss='mse', optimizer='sgd') # centered on training_mean, variance training_var train_data = np.random.normal( loc=training_mean, scale=training_var, size=(1000, 10)) model.fit(train_data, train_data, epochs=4, verbose=0) return model.weights def _test_batchnorm_layer( self, norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var): out_tensor = norm(tf.convert_to_tensor(test_data, dtype=tf.float32), training=training_arg) out = out_tensor out -= norm.beta out /= norm.gamma if not should_be_training: out *= training_var out += (training_mean - testing_mean) out /= testing_var np.testing.assert_allclose(out.numpy().mean(), 0.0, atol=1.5e-1) np.testing.assert_allclose(out.numpy().std(), 1.0, atol=1.5e-1) @parameterized.parameters(True, False) def test_batchnorm_freezing_training_none(self, use_sync_batch_norm): training_mean = 5.0 training_var = 10.0 testing_mean = -10.0 testing_var = 5.0 # Initially train the batch norm, and save the weights trained_weights = self._train_freezable_batch_norm(training_mean, training_var, use_sync_batch_norm) # Load the batch norm weights, freezing training to True. # Apply the batch norm layer to testing data and ensure it is normalized # according to the batch statistics. model, norm = self._build_model(use_sync_batch_norm, training=True) self._copy_weights(trained_weights, model.weights) # centered on testing_mean, variance testing_var test_data = np.random.normal( loc=testing_mean, scale=testing_var, size=(1000, 10)) # Test with training=True passed to the call method: training_arg = True should_be_training = True self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Reset the weights, because they may have been updating by # running with training=True self._copy_weights(trained_weights, model.weights) # Test with training=False passed to the call method: training_arg = False should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Test the layer in various Keras learning phase scopes: training_arg = None should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) tf.keras.backend.set_learning_phase(True) should_be_training = True self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Reset the weights, because they may have been updating by # running with training=True self._copy_weights(trained_weights, model.weights) tf.keras.backend.set_learning_phase(False) should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) @parameterized.parameters(True, False) def test_batchnorm_freezing_training_false(self, use_sync_batch_norm): training_mean = 5.0 training_var = 10.0 testing_mean = -10.0 testing_var = 5.0 # Initially train the batch norm, and save the weights trained_weights = self._train_freezable_batch_norm(training_mean, training_var, use_sync_batch_norm) # Load the batch norm back up, freezing training to False. # Apply the batch norm layer to testing data and ensure it is normalized # according to the training data's statistics. model, norm = self._build_model(use_sync_batch_norm, training=False) self._copy_weights(trained_weights, model.weights) # centered on testing_mean, variance testing_var test_data = np.random.normal( loc=testing_mean, scale=testing_var, size=(1000, 10)) # Make sure that the layer is never training # Test with training=True passed to the call method: training_arg = True should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Test with training=False passed to the call method: training_arg = False should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) # Test the layer in various Keras learning phase scopes: training_arg = None should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) tf.keras.backend.set_learning_phase(True) should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) tf.keras.backend.set_learning_phase(False) should_be_training = False self._test_batchnorm_layer(norm, should_be_training, test_data, testing_mean, testing_var, training_arg, training_mean, training_var) if __name__ == '__main__': tf.test.main()
8,681
38.643836
80
py
models
models-master/research/object_detection/core/class_agnostic_nms_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for google3.third_party.tensorflow_models.object_detection.core.class_agnostic_nms.""" from absl.testing import parameterized import tensorflow.compat.v1 as tf from object_detection.core import post_processing from object_detection.core import standard_fields as fields from object_detection.utils import test_case class ClassAgnosticNonMaxSuppressionTest(test_case.TestCase, parameterized.TestCase): def test_class_agnostic_nms_select_with_shared_boxes(self): def graph_fn(): boxes = tf.constant( [[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]], tf.float32) scores = tf.constant([[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]) score_thresh = 0.1 iou_thresh = .5 max_classes_per_detection = 1 max_output_size = 4 nms, _ = post_processing.class_agnostic_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_classes_per_detection, max_output_size) return (nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes)) exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]] exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 0, 1, 0] (nms_corners_output, nms_scores_output, nms_classes_output) = self.execute_cpu(graph_fn, []) self.assertAllClose(nms_corners_output, exp_nms_corners) self.assertAllClose(nms_scores_output, exp_nms_scores) self.assertAllClose(nms_classes_output, exp_nms_classes) def test_class_agnostic_nms_select_with_per_class_boxes(self): def graph_fn(): boxes = tf.constant( [[[4, 5, 9, 10], [0, 0, 1, 1]], [[0, 0.1, 1, 1.1], [4, 5, 9, 10]], [[0, -0.1, 1, 0.9], [4, 5, 9, 10]], [[0, 10, 1, 11], [4, 5, 9, 10]], [[0, 10.1, 1, 11.1], [4, 5, 9, 10]], [[0, 100, 1, 101], [4, 5, 9, 10]], [[4, 5, 9, 10], [0, 1000, 1, 1002]], [[4, 5, 9, 10], [0, 1000, 1, 1002.1]]], tf.float32) scores = tf.constant([[.01, 0.9], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]) score_thresh = 0.1 iou_thresh = .5 max_classes_per_detection = 1 max_output_size = 4 nms, _ = post_processing.class_agnostic_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_classes_per_detection, max_output_size) return (nms.get(), nms.get_field(fields.BoxListFields.scores), nms.get_field(fields.BoxListFields.classes)) (nms_corners_output, nms_scores_output, nms_classes_output) = self.execute_cpu(graph_fn, []) exp_nms_corners = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]] exp_nms_scores = [.95, .9, .85, .3] exp_nms_classes = [0, 1, 1, 0] self.assertAllClose(nms_corners_output, exp_nms_corners) self.assertAllClose(nms_scores_output, exp_nms_scores) self.assertAllClose(nms_classes_output, exp_nms_classes) # Two cases will be tested here: using / not using static shapes. # Named the two test cases for easier control during testing, with a flag of # '--test_filter=ClassAgnosticNonMaxSuppressionTest.test_batch_classagnostic_nms_with_batch_size_1' # or # '--test_filter=ClassAgnosticNonMaxSuppressionTest.test_batch_classagnostic_nms_with_batch_size_1_use_static_shapes'. @parameterized.named_parameters(('', False), ('_use_static_shapes', True)) def test_batch_classagnostic_nms_with_batch_size_1(self, use_static_shapes=False): def graph_fn(): boxes = tf.constant( [[[[0, 0, 1, 1]], [[0, 0.1, 1, 1.1]], [[0, -0.1, 1, 0.9]], [[0, 10, 1, 11]], [[0, 10.1, 1, 11.1]], [[0, 100, 1, 101]], [[0, 1000, 1, 1002]], [[0, 1000, 1, 1002.1]]]], tf.float32) scores = tf.constant([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]]) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 max_classes_per_detection = 1 use_class_agnostic_nms = True (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, use_class_agnostic_nms=use_class_agnostic_nms, use_static_shapes=use_static_shapes, max_classes_per_detection=max_classes_per_detection) self.assertIsNone(nmsed_masks) self.assertIsNone(nmsed_additional_fields) return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) exp_nms_corners = [[[0, 10, 1, 11], [0, 0, 1, 1], [0, 1000, 1, 1002], [0, 100, 1, 101]]] exp_nms_scores = [[.95, .9, .85, .3]] exp_nms_classes = [[0, 0, 1, 0]] (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = self.execute_cpu(graph_fn, []) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertEqual(num_detections, [4]) if __name__ == '__main__': tf.test.main()
6,616
44.634483
120
py
models
models-master/research/object_detection/core/densepose_ops_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.densepose_ops.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.core import densepose_ops from object_detection.utils import test_case class DensePoseOpsTest(test_case.TestCase): """Tests for common DensePose operations.""" def test_scale(self): def graph_fn(): dp_surface_coords = tf.constant([ [[0.0, 0.0, 0.1, 0.2], [100.0, 200.0, 0.3, 0.4]], [[50.0, 120.0, 0.5, 0.6], [100.0, 140.0, 0.7, 0.8]] ]) y_scale = tf.constant(1.0 / 100) x_scale = tf.constant(1.0 / 200) output = densepose_ops.scale(dp_surface_coords, y_scale, x_scale) return output output = self.execute(graph_fn, []) expected_dp_surface_coords = np.array([ [[0., 0., 0.1, 0.2], [1.0, 1.0, 0.3, 0.4]], [[0.5, 0.6, 0.5, 0.6], [1.0, 0.7, 0.7, 0.8]] ]) self.assertAllClose(output, expected_dp_surface_coords) def test_clip_to_window(self): def graph_fn(): dp_surface_coords = tf.constant([ [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], [[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) output = densepose_ops.clip_to_window(dp_surface_coords, window) return output output = self.execute(graph_fn, []) expected_dp_surface_coords = np.array([ [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], [[0.5, 0.25, 0.5, 0.6], [0.75, 0.75, 0.7, 0.8]] ]) self.assertAllClose(output, expected_dp_surface_coords) def test_prune_outside_window(self): def graph_fn(): dp_num_points = tf.constant([2, 0, 1]) dp_part_ids = tf.constant([[1, 1], [0, 0], [16, 0]]) dp_surface_coords = tf.constant([ [[0.9, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.8, 0.5, 0.6, 0.6], [0.5, 0.5, 0.7, 0.7]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) new_dp_num_points, new_dp_part_ids, new_dp_surface_coords = ( densepose_ops.prune_outside_window(dp_num_points, dp_part_ids, dp_surface_coords, window)) return new_dp_num_points, new_dp_part_ids, new_dp_surface_coords new_dp_num_points, new_dp_part_ids, new_dp_surface_coords = ( self.execute_cpu(graph_fn, [])) expected_dp_num_points = np.array([1, 0, 0]) expected_dp_part_ids = np.array([[1], [0], [0]]) expected_dp_surface_coords = np.array([ [[0.75, 0.75, 0.3, 0.4]], [[0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0]] ]) self.assertAllEqual(new_dp_num_points, expected_dp_num_points) self.assertAllEqual(new_dp_part_ids, expected_dp_part_ids) self.assertAllClose(new_dp_surface_coords, expected_dp_surface_coords) def test_change_coordinate_frame(self): def graph_fn(): dp_surface_coords = tf.constant([ [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], [[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) output = densepose_ops.change_coordinate_frame(dp_surface_coords, window) return output output = self.execute(graph_fn, []) expected_dp_surface_coords = np.array([ [[0, 0.5, 0.1, 0.2], [1.0, 1.0, 0.3, 0.4]], [[0.5, -0.5, 0.5, 0.6], [1.5, 1.5, 0.7, 0.8]] ]) self.assertAllClose(output, expected_dp_surface_coords) def test_to_normalized_coordinates(self): def graph_fn(): dp_surface_coords = tf.constant([ [[10., 30., 0.1, 0.2], [30., 45., 0.3, 0.4]], [[20., 0., 0.5, 0.6], [40., 60., 0.7, 0.8]] ]) output = densepose_ops.to_normalized_coordinates( dp_surface_coords, 40, 60) return output output = self.execute(graph_fn, []) expected_dp_surface_coords = np.array([ [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], [[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]] ]) self.assertAllClose(output, expected_dp_surface_coords) def test_to_absolute_coordinates(self): def graph_fn(): dp_surface_coords = tf.constant([ [[0.25, 0.5, 0.1, 0.2], [0.75, 0.75, 0.3, 0.4]], [[0.5, 0.0, 0.5, 0.6], [1.0, 1.0, 0.7, 0.8]] ]) output = densepose_ops.to_absolute_coordinates( dp_surface_coords, 40, 60) return output output = self.execute(graph_fn, []) expected_dp_surface_coords = np.array([ [[10., 30., 0.1, 0.2], [30., 45., 0.3, 0.4]], [[20., 0., 0.5, 0.6], [40., 60., 0.7, 0.8]] ]) self.assertAllClose(output, expected_dp_surface_coords) def test_horizontal_flip(self): part_ids_np = np.array([[1, 4], [0, 8]], dtype=np.int32) surf_coords_np = np.array([ [[0.1, 0.7, 0.2, 0.4], [0.3, 0.8, 0.2, 0.4]], [[0.0, 0.5, 0.8, 0.7], [0.6, 1.0, 0.7, 0.9]], ], dtype=np.float32) def graph_fn(): part_ids = tf.constant(part_ids_np, dtype=tf.int32) surf_coords = tf.constant(surf_coords_np, dtype=tf.float32) flipped_part_ids, flipped_surf_coords = densepose_ops.flip_horizontal( part_ids, surf_coords) flipped_twice_part_ids, flipped_twice_surf_coords = ( densepose_ops.flip_horizontal(flipped_part_ids, flipped_surf_coords)) return (flipped_part_ids, flipped_surf_coords, flipped_twice_part_ids, flipped_twice_surf_coords) (flipped_part_ids, flipped_surf_coords, flipped_twice_part_ids, flipped_twice_surf_coords) = self.execute(graph_fn, []) expected_flipped_part_ids = [[1, 5], # 1->1, 4->5 [0, 9]] # 0->0, 8->9 expected_flipped_surf_coords_yx = np.array([ [[0.1, 1.0-0.7], [0.3, 1.0-0.8]], [[0.0, 1.0-0.5], [0.6, 1.0-1.0]], ], dtype=np.float32) self.assertAllEqual(expected_flipped_part_ids, flipped_part_ids) self.assertAllClose(expected_flipped_surf_coords_yx, flipped_surf_coords[:, :, 0:2]) self.assertAllEqual(part_ids_np, flipped_twice_part_ids) self.assertAllClose(surf_coords_np, flipped_twice_surf_coords, rtol=1e-2, atol=1e-2) if __name__ == '__main__': tf.test.main()
6,942
37.787709
80
py
models
models-master/research/object_detection/core/prefetcher_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.prefetcher.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.core import prefetcher from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class PrefetcherTest(tf.test.TestCase): """Test class for prefetcher.""" def test_prefetch_tensors_with_fully_defined_shapes(self): with self.test_session() as sess: batch_size = 10 image_size = 32 num_batches = 5 examples = tf.Variable(tf.constant(0, dtype=tf.int64)) counter = examples.count_up_to(num_batches) image = tf.random_normal([batch_size, image_size, image_size, 3], dtype=tf.float32, name='images') label = tf.random_uniform([batch_size, 1], 0, 10, dtype=tf.int32, name='labels') prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter, 'image': image, 'label': label}, capacity=100) tensor_dict = prefetch_queue.dequeue() self.assertAllEqual(tensor_dict['image'].get_shape().as_list(), [batch_size, image_size, image_size, 3]) self.assertAllEqual(tensor_dict['label'].get_shape().as_list(), [batch_size, 1]) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): for _ in range(num_batches): results = sess.run(tensor_dict) self.assertEquals(results['image'].shape, (batch_size, image_size, image_size, 3)) self.assertEquals(results['label'].shape, (batch_size, 1)) with self.assertRaises(tf.errors.OutOfRangeError): sess.run(tensor_dict) def test_prefetch_tensors_with_partially_defined_shapes(self): with self.test_session() as sess: batch_size = 10 image_size = 32 num_batches = 5 examples = tf.Variable(tf.constant(0, dtype=tf.int64)) counter = examples.count_up_to(num_batches) image = tf.random_normal([batch_size, tf.Variable(image_size), tf.Variable(image_size), 3], dtype=tf.float32, name='image') image.set_shape([batch_size, None, None, 3]) label = tf.random_uniform([batch_size, tf.Variable(1)], 0, 10, dtype=tf.int32, name='label') label.set_shape([batch_size, None]) prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter, 'image': image, 'label': label}, capacity=100) tensor_dict = prefetch_queue.dequeue() self.assertAllEqual(tensor_dict['image'].get_shape().as_list(), [batch_size, None, None, 3]) self.assertAllEqual(tensor_dict['label'].get_shape().as_list(), [batch_size, None]) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): for _ in range(num_batches): results = sess.run(tensor_dict) self.assertEquals(results['image'].shape, (batch_size, image_size, image_size, 3)) self.assertEquals(results['label'].shape, (batch_size, 1)) with self.assertRaises(tf.errors.OutOfRangeError): sess.run(tensor_dict) if __name__ == '__main__': tf.test.main()
4,622
41.027273
80
py
models
models-master/research/object_detection/core/box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Box predictor for object detectors. Box predictors are classes that take a high level image feature map as input and produce two predictions, (1) a tensor encoding box locations, and (2) a tensor encoding classes for each box. These components are passed directly to loss functions in our detection models. These modules are separated from the main model since the same few box predictor architectures are shared across many models. """ from abc import abstractmethod import tensorflow.compat.v1 as tf BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' MASK_PREDICTIONS = 'mask_predictions' class BoxPredictor(object): """BoxPredictor.""" def __init__(self, is_training, num_classes): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). """ self._is_training = is_training self._num_classes = num_classes @property def is_keras_model(self): return False @property def num_classes(self): return self._num_classes def predict(self, image_features, num_predictions_per_location, scope=None, **params): """Computes encoded object locations and corresponding confidences. Takes a list of high level image feature maps as input and produces a list of box encodings and a list of class scores where each element in the output lists correspond to the feature maps in the input list. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. scope: Variable and Op scope name. **params: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. Raises: ValueError: If length of `image_features` is not equal to length of `num_predictions_per_location`. """ if len(image_features) != len(num_predictions_per_location): raise ValueError('image_feature and num_predictions_per_location must ' 'be of same length, found: {} vs {}'. format(len(image_features), len(num_predictions_per_location))) if scope is not None: with tf.variable_scope(scope): return self._predict(image_features, num_predictions_per_location, **params) return self._predict(image_features, num_predictions_per_location, **params) # TODO(rathodv): num_predictions_per_location could be moved to constructor. # This is currently only used by ConvolutionalBoxPredictor. @abstractmethod def _predict(self, image_features, num_predictions_per_location, **params): """Implementations must override this method. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. **params: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ pass class KerasBoxPredictor(tf.keras.layers.Layer): """Keras-based BoxPredictor.""" def __init__(self, is_training, num_classes, freeze_batchnorm, inplace_batchnorm_update, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. """ super(KerasBoxPredictor, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._freeze_batchnorm = freeze_batchnorm self._inplace_batchnorm_update = inplace_batchnorm_update @property def is_keras_model(self): return True @property def num_classes(self): return self._num_classes def call(self, image_features, **kwargs): """Computes encoded object locations and corresponding confidences. Takes a list of high level image feature maps as input and produces a list of box encodings and a list of class scores where each element in the output lists correspond to the feature maps in the input list. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ return self._predict(image_features, **kwargs) @abstractmethod def _predict(self, image_features, **kwargs): """Implementations must override this method. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ raise NotImplementedError
10,193
43.710526
80
py
models
models-master/research/object_detection/core/preprocessor_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.preprocessor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from absl.testing import parameterized import numpy as np import six from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf from object_detection.core import preprocessor from object_detection.core import preprocessor_cache from object_detection.core import standard_fields as fields from object_detection.utils import test_case from object_detection.utils import tf_version if six.PY2: import mock # pylint: disable=g-import-not-at-top else: mock = unittest.mock # pylint: disable=g-import-not-at-top class PreprocessorTest(test_case.TestCase, parameterized.TestCase): def createColorfulTestImage(self): ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8)) ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8)) ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8)) imr = tf.concat([ch255, ch0, ch0], 3) img = tf.concat([ch255, ch255, ch0], 3) imb = tf.concat([ch255, ch0, ch255], 3) imw = tf.concat([ch128, ch128, ch128], 3) imu = tf.concat([imr, img], 2) imd = tf.concat([imb, imw], 2) im = tf.concat([imu, imd], 1) return im def createTestImages(self): images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128], [0, 128, 128, 128], [192, 192, 128, 128]]], dtype=tf.uint8) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128], [0, 128, 192, 192], [192, 192, 128, 192]]], dtype=tf.uint8) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192], [0, 128, 128, 0], [192, 192, 192, 128]]], dtype=tf.uint8) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def createEmptyTestBoxes(self): boxes = tf.constant([[]], dtype=tf.float32) return boxes def createTestBoxes(self): boxes = tf.constant( [[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) return boxes def createRandomTextBoxes(self): random_boxes = tf.concat([tf.random.uniform([100, 2], 0.0, 0.5, seed=1), tf.random.uniform([100, 2], 0.5, 1.0, seed=2)], axis=1) fixed_boxes = tf.constant( [[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0], [0.0, 0.0, 1.0, 1.0], [0.1, 0.2, 0.3, 0.4]], dtype=tf.float32) zero_boxes = tf.zeros((50, 4)) return tf.concat([random_boxes, fixed_boxes, zero_boxes], axis=0) def createTestGroundtruthWeights(self): return tf.constant([1.0, 0.5], dtype=tf.float32) def createZeroBoxes(self): return tf.zeros((100, 4)) def createTestMasks(self): mask = np.array([ [[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]], [[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]]) return tf.constant(mask, dtype=tf.float32) def createTestKeypoints(self): keypoints_np = np.array([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], ]) keypoints = tf.constant(keypoints_np, dtype=tf.float32) keypoint_visibilities = tf.constant( [ [True, True, False], [False, True, True] ]) return keypoints, keypoint_visibilities def createTestKeypointDepths(self): keypoint_depths = tf.constant([ [1.0, 0.9, 0.8], [0.7, 0.6, 0.5] ], dtype=tf.float32) keypoint_depth_weights = tf.constant([ [0.5, 0.6, 0.7], [0.8, 0.9, 1.0] ], dtype=tf.float32) return keypoint_depths, keypoint_depth_weights def createTestKeypointsInsideCrop(self): keypoints = np.array([ [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], ]) return tf.constant(keypoints, dtype=tf.float32) def createTestKeypointsOutsideCrop(self): keypoints = np.array([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], ]) return tf.constant(keypoints, dtype=tf.float32) def createTestDensePose(self): dp_num_points = tf.constant([1, 3], dtype=tf.int32) dp_part_ids = tf.constant( [[4, 0, 0], [1, 0, 5]], dtype=tf.int32) dp_surface_coords = tf.constant( [ # Instance 0. [[0.1, 0.2, 0.6, 0.7], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], # Instance 1. [[0.8, 0.9, 0.2, 0.4], [0.1, 0.3, 0.2, 0.8], [0.6, 1.0, 0.3, 0.4]], ], dtype=tf.float32) return dp_num_points, dp_part_ids, dp_surface_coords def createKeypointFlipPermutation(self): return [0, 2, 1] def createKeypointRotPermutation(self): return [0, 2, 1] def createTestLabels(self): labels = tf.constant([1, 2], dtype=tf.int32) return labels def createTestLabelsLong(self): labels = tf.constant([1, 2, 4], dtype=tf.int32) return labels def createTestBoxesOutOfImage(self): boxes = tf.constant( [[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32) return boxes def createTestMultiClassScores(self): return tf.constant([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32) def expectedImagesAfterNormalization(self): images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0], [-1, 0, 0, 0], [0.5, 0.5, 0, 0]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0], [-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], [-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedMaxImageAfterColorScale(self): images_r = tf.constant([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], [-0.9, 0.1, 0.1, 0.1], [0.6, 0.6, 0.1, 0.1]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], [-0.9, 0.1, 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6], [-0.9, 0.1, 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedMinImageAfterColorScale(self): images_r = tf.constant([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1], [-1, -0.1, -0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1], [-1, -0.1, 0.4, 0.4], [0.4, 0.4, -0.1, 0.4]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4], [-1, -0.1, -0.1, -1], [0.4, 0.4, 0.4, -0.1]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedImagesAfterLeftRightFlip(self): images_r = tf.constant([[[0, 0, 0, 0], [0, 0, -1, -1], [0, 0, 0, -1], [0, 0, 0.5, 0.5]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0, 0, -1, -1], [0, 0, -1, -1], [0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1], [-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedImagesAfterUpDownFlip(self): images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], [-1, -1, 0, 0], [0, 0, 0, 0]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], [-1, -1, 0, 0], [-1, -1, 0, 0]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], [-1, -1, 0, 0.5], [0, 0, 0.5, -1]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedImagesAfterRot90(self): images_r = tf.constant([[[0, 0, 0, 0], [0, 0, 0, 0], [0, -1, 0, 0.5], [0, -1, -1, 0.5]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0], [-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5], [0, -1, 0, 0.5], [0, -1, -1, 0.5]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedBoxesAfterLeftRightFlip(self): boxes = tf.constant([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]], dtype=tf.float32) return boxes def expectedBoxesAfterUpDownFlip(self): boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) return boxes def expectedBoxesAfterRot90(self): boxes = tf.constant( [[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32) return boxes def expectedMasksAfterLeftRightFlip(self): mask = np.array([ [[0.0, 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, 255.0]], [[0.0, 255.0, 255.0], [0.0, 255.0, 255.0], [0.0, 255.0, 255.0]]]) return tf.constant(mask, dtype=tf.float32) def expectedMasksAfterUpDownFlip(self): mask = np.array([ [[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]], [[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]]) return tf.constant(mask, dtype=tf.float32) def expectedMasksAfterRot90(self): mask = np.array([ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [255.0, 255.0, 255.0]], [[0.0, 0.0, 0.0], [255.0, 255.0, 255.0], [255.0, 255.0, 255.0]]]) return tf.constant(mask, dtype=tf.float32) def expectedLabelScoresAfterThresholding(self): return tf.constant([1.0], dtype=tf.float32) def expectedBoxesAfterThresholding(self): return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32) def expectedLabelsAfterThresholding(self): return tf.constant([1], dtype=tf.float32) def expectedMultiClassScoresAfterThresholding(self): return tf.constant([[1.0, 0.0]], dtype=tf.float32) def expectedMasksAfterThresholding(self): mask = np.array([ [[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]]]) return tf.constant(mask, dtype=tf.float32) def expectedKeypointsAfterThresholding(self): keypoints = np.array([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]] ]) return tf.constant(keypoints, dtype=tf.float32) def expectedLabelScoresAfterThresholdingWithMissingScore(self): return tf.constant([np.nan], dtype=tf.float32) def expectedBoxesAfterThresholdingWithMissingScore(self): return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32) def expectedLabelsAfterThresholdingWithMissingScore(self): return tf.constant([2], dtype=tf.float32) def expectedLabelScoresAfterDropping(self): return tf.constant([0.5], dtype=tf.float32) def expectedBoxesAfterDropping(self): return tf.constant([[0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) def expectedLabelsAfterDropping(self): return tf.constant([2], dtype=tf.float32) def expectedMultiClassScoresAfterDropping(self): return tf.constant([[0.5, 0.5]], dtype=tf.float32) def expectedMasksAfterDropping(self): masks = np.array([[[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]]) return tf.constant(masks, dtype=tf.float32) def expectedKeypointsAfterDropping(self): keypoints = np.array([[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]) return tf.constant(keypoints, dtype=tf.float32) def expectedLabelsAfterRemapping(self): return tf.constant([3, 3, 4], dtype=tf.float32) def testRgbToGrayscale(self): def graph_fn(): images = self.createTestImages() grayscale_images = preprocessor._rgb_to_grayscale(images) expected_images = tf.image.rgb_to_grayscale(images) return grayscale_images, expected_images (grayscale_images, expected_images) = self.execute_cpu(graph_fn, []) self.assertAllEqual(expected_images, grayscale_images) def testNormalizeImage(self): def graph_fn(): preprocess_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 256, 'target_minval': -1, 'target_maxval': 1 })] images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] images_expected = self.expectedImagesAfterNormalization() return images, images_expected images_, images_expected_ = self.execute_cpu(graph_fn, []) images_shape_ = images_.shape images_expected_shape_ = images_expected_.shape expected_shape = [1, 4, 4, 3] self.assertAllEqual(images_expected_shape_, images_shape_) self.assertAllEqual(images_shape_, expected_shape) self.assertAllClose(images_, images_expected_) def testRetainBoxesAboveThreshold(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() (retained_boxes, retained_labels, retained_weights) = preprocessor.retain_boxes_above_threshold( boxes, labels, weights, threshold=0.6) return [ retained_boxes, retained_labels, retained_weights, self.expectedBoxesAfterThresholding(), self.expectedLabelsAfterThresholding(), self.expectedLabelScoresAfterThresholding() ] (retained_boxes_, retained_labels_, retained_weights_, expected_retained_boxes_, expected_retained_labels_, expected_retained_weights_) = self.execute_cpu(graph_fn, []) self.assertAllClose( retained_boxes_, expected_retained_boxes_) self.assertAllClose( retained_labels_, expected_retained_labels_) self.assertAllClose( retained_weights_, expected_retained_weights_) def testRetainBoxesAboveThresholdWithMultiClassScores(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() multiclass_scores = self.createTestMultiClassScores() (_, _, _, retained_multiclass_scores) = preprocessor.retain_boxes_above_threshold( boxes, labels, weights, multiclass_scores=multiclass_scores, threshold=0.6) return [ retained_multiclass_scores, self.expectedMultiClassScoresAfterThresholding() ] (retained_multiclass_scores_, expected_retained_multiclass_scores_) = self.execute(graph_fn, []) self.assertAllClose(retained_multiclass_scores_, expected_retained_multiclass_scores_) def testRetainBoxesAboveThresholdWithMasks(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = self.createTestMasks() _, _, _, retained_masks = preprocessor.retain_boxes_above_threshold( boxes, labels, weights, masks, threshold=0.6) return [ retained_masks, self.expectedMasksAfterThresholding()] retained_masks_, expected_retained_masks_ = self.execute_cpu(graph_fn, []) self.assertAllClose( retained_masks_, expected_retained_masks_) def testRetainBoxesAboveThresholdWithKeypoints(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints, _ = self.createTestKeypoints() (_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold( boxes, labels, weights, keypoints=keypoints, threshold=0.6) return [retained_keypoints, self.expectedKeypointsAfterThresholding()] (retained_keypoints_, expected_retained_keypoints_) = self.execute_cpu(graph_fn, []) self.assertAllClose(retained_keypoints_, expected_retained_keypoints_) def testDropLabelProbabilistically(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() (retained_boxes, retained_labels, retained_weights) = preprocessor.drop_label_probabilistically( boxes, labels, weights, dropped_label=1, drop_probability=1.0) return [ retained_boxes, retained_labels, retained_weights, self.expectedBoxesAfterDropping(), self.expectedLabelsAfterDropping(), self.expectedLabelScoresAfterDropping() ] (retained_boxes_, retained_labels_, retained_weights_, expected_retained_boxes_, expected_retained_labels_, expected_retained_weights_) = self.execute_cpu(graph_fn, []) self.assertAllClose(retained_boxes_, expected_retained_boxes_) self.assertAllClose(retained_labels_, expected_retained_labels_) self.assertAllClose(retained_weights_, expected_retained_weights_) def testDropLabelProbabilisticallyWithMultiClassScores(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() multiclass_scores = self.createTestMultiClassScores() (_, _, _, retained_multiclass_scores) = preprocessor.drop_label_probabilistically( boxes, labels, weights, multiclass_scores=multiclass_scores, dropped_label=1, drop_probability=1.0) return [retained_multiclass_scores, self.expectedMultiClassScoresAfterDropping()] (retained_multiclass_scores_, expected_retained_multiclass_scores_) = self.execute_cpu(graph_fn, []) self.assertAllClose(retained_multiclass_scores_, expected_retained_multiclass_scores_) def testDropLabelProbabilisticallyWithMasks(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = self.createTestMasks() (_, _, _, retained_masks) = preprocessor.drop_label_probabilistically( boxes, labels, weights, masks=masks, dropped_label=1, drop_probability=1.0) return [retained_masks, self.expectedMasksAfterDropping()] (retained_masks_, expected_retained_masks_) = self.execute_cpu(graph_fn, []) self.assertAllClose(retained_masks_, expected_retained_masks_) def testDropLabelProbabilisticallyWithKeypoints(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints, _ = self.createTestKeypoints() (_, _, _, retained_keypoints) = preprocessor.drop_label_probabilistically( boxes, labels, weights, keypoints=keypoints, dropped_label=1, drop_probability=1.0) return [retained_keypoints, self.expectedKeypointsAfterDropping()] (retained_keypoints_, expected_retained_keypoints_) = self.execute_cpu(graph_fn, []) self.assertAllClose(retained_keypoints_, expected_retained_keypoints_) def testRemapLabels(self): def graph_fn(): labels = self.createTestLabelsLong() remapped_labels = preprocessor.remap_labels(labels, [1, 2], 3) return [remapped_labels, self.expectedLabelsAfterRemapping()] (remapped_labels_, expected_remapped_labels_) = self.execute_cpu(graph_fn, []) self.assertAllClose(remapped_labels_, expected_remapped_labels_) def testFlipBoxesLeftRight(self): def graph_fn(): boxes = self.createTestBoxes() flipped_boxes = preprocessor._flip_boxes_left_right(boxes) expected_boxes = self.expectedBoxesAfterLeftRightFlip() return flipped_boxes, expected_boxes flipped_boxes, expected_boxes = self.execute_cpu(graph_fn, []) self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten()) def testFlipBoxesUpDown(self): def graph_fn(): boxes = self.createTestBoxes() flipped_boxes = preprocessor._flip_boxes_up_down(boxes) expected_boxes = self.expectedBoxesAfterUpDownFlip() return flipped_boxes, expected_boxes flipped_boxes, expected_boxes = self.execute_cpu(graph_fn, []) self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten()) def testRot90Boxes(self): def graph_fn(): boxes = self.createTestBoxes() rotated_boxes = preprocessor._rot90_boxes(boxes) expected_boxes = self.expectedBoxesAfterRot90() return rotated_boxes, expected_boxes rotated_boxes, expected_boxes = self.execute_cpu(graph_fn, []) self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten()) def testFlipMasksLeftRight(self): def graph_fn(): test_mask = self.createTestMasks() flipped_mask = preprocessor._flip_masks_left_right(test_mask) expected_mask = self.expectedMasksAfterLeftRightFlip() return flipped_mask, expected_mask flipped_mask, expected_mask = self.execute_cpu(graph_fn, []) self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten()) def testFlipMasksUpDown(self): def graph_fn(): test_mask = self.createTestMasks() flipped_mask = preprocessor._flip_masks_up_down(test_mask) expected_mask = self.expectedMasksAfterUpDownFlip() return flipped_mask, expected_mask flipped_mask, expected_mask = self.execute_cpu(graph_fn, []) self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten()) def testRot90Masks(self): def graph_fn(): test_mask = self.createTestMasks() rotated_mask = preprocessor._rot90_masks(test_mask) expected_mask = self.expectedMasksAfterRot90() return [rotated_mask, expected_mask] rotated_mask, expected_mask = self.execute(graph_fn, []) self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten()) def _testPreprocessorCache(self, preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False): if self.is_tf2(): return def graph_fn(): cache = preprocessor_cache.PreprocessorCache() images = self.createTestImages() boxes = self.createTestBoxes() weights = self.createTestGroundtruthWeights() classes = self.createTestLabels() masks = self.createTestMasks() keypoints, _ = self.createTestKeypoints() preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=test_masks, include_keypoints=test_keypoints) out = [] for _ in range(2): tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_weights: weights } if test_boxes: tensor_dict[fields.InputDataFields.groundtruth_boxes] = boxes tensor_dict[fields.InputDataFields.groundtruth_classes] = classes if test_masks: tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks if test_keypoints: tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints out.append( preprocessor.preprocess(tensor_dict, preprocess_options, preprocessor_arg_map, cache)) return out out1, out2 = self.execute_cpu_tf1(graph_fn, []) for (_, v1), (_, v2) in zip(out1.items(), out2.items()): self.assertAllClose(v1, v2) def testRandomHorizontalFlip(self): def graph_fn(): preprocess_options = [(preprocessor.random_horizontal_flip, {})] images = self.expectedImagesAfterNormalization() boxes = self.createTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_expected1 = self.expectedImagesAfterLeftRightFlip() boxes_expected1 = self.expectedBoxesAfterLeftRightFlip() images_expected2 = images boxes_expected2 = boxes tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) boxes_diff_expected = tf.zeros_like(boxes_diff) images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) return [images_diff, images_diff_expected, boxes_diff, boxes_diff_expected] (images_diff_, images_diff_expected_, boxes_diff_, boxes_diff_expected_) = self.execute_cpu(graph_fn, []) self.assertAllClose(boxes_diff_, boxes_diff_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomHorizontalFlipWithEmptyBoxes(self): def graph_fn(): preprocess_options = [(preprocessor.random_horizontal_flip, {})] images = self.expectedImagesAfterNormalization() boxes = self.createEmptyTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_expected1 = self.expectedImagesAfterLeftRightFlip() boxes_expected = self.createEmptyTestBoxes() images_expected2 = images tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) return [images_diff, images_diff_expected, boxes, boxes_expected] (images_diff_, images_diff_expected_, boxes_, boxes_expected_) = self.execute_cpu(graph_fn, []) self.assertAllClose(boxes_, boxes_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomHorizontalFlipWithCache(self): keypoint_flip_permutation = self.createKeypointFlipPermutation() preprocess_options = [ (preprocessor.random_horizontal_flip, {'keypoint_flip_permutation': keypoint_flip_permutation})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRunRandomHorizontalFlipWithKeypointDepth(self): def graph_fn(): preprocess_options = [(preprocessor.random_horizontal_flip, {})] image_height = 3 image_width = 3 images = tf.random_uniform([1, image_height, image_width, 3]) boxes = self.createTestBoxes() masks = self.createTestMasks() keypoints, keypoint_visibilities = self.createTestKeypoints() keypoint_depths, keypoint_depth_weights = self.createTestKeypointDepths() keypoint_flip_permutation = self.createKeypointFlipPermutation() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_instance_masks: masks, fields.InputDataFields.groundtruth_keypoints: keypoints, fields.InputDataFields.groundtruth_keypoint_visibilities: keypoint_visibilities, fields.InputDataFields.groundtruth_keypoint_depths: keypoint_depths, fields.InputDataFields.groundtruth_keypoint_depth_weights: keypoint_depth_weights, } preprocess_options = [(preprocessor.random_horizontal_flip, { 'keypoint_flip_permutation': keypoint_flip_permutation, 'probability': 1.0 })] preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True, include_keypoints=True, include_keypoint_visibilities=True, include_dense_pose=False, include_keypoint_depths=True) tensor_dict = preprocessor.preprocess( tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) keypoint_depths = tensor_dict[ fields.InputDataFields.groundtruth_keypoint_depths] keypoint_depth_weights = tensor_dict[ fields.InputDataFields.groundtruth_keypoint_depth_weights] output_tensors = [keypoint_depths, keypoint_depth_weights] return output_tensors output_tensors = self.execute_cpu(graph_fn, []) expected_keypoint_depths = [[1.0, 0.8, 0.9], [0.7, 0.5, 0.6]] expected_keypoint_depth_weights = [[0.5, 0.7, 0.6], [0.8, 1.0, 0.9]] self.assertAllClose(expected_keypoint_depths, output_tensors[0]) self.assertAllClose(expected_keypoint_depth_weights, output_tensors[1]) def testRandomVerticalFlip(self): def graph_fn(): preprocess_options = [(preprocessor.random_vertical_flip, {})] images = self.expectedImagesAfterNormalization() boxes = self.createTestBoxes() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes } images_expected1 = self.expectedImagesAfterUpDownFlip() boxes_expected1 = self.expectedBoxesAfterUpDownFlip() images_expected2 = images boxes_expected2 = boxes tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) boxes_diff_expected = tf.zeros_like(boxes_diff) images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) return [ images_diff, images_diff_expected, boxes_diff, boxes_diff_expected ] (images_diff_, images_diff_expected_, boxes_diff_, boxes_diff_expected_) = self.execute_cpu(graph_fn, []) self.assertAllClose(boxes_diff_, boxes_diff_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomVerticalFlipWithEmptyBoxes(self): def graph_fn(): preprocess_options = [(preprocessor.random_vertical_flip, {})] images = self.expectedImagesAfterNormalization() boxes = self.createEmptyTestBoxes() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes } images_expected1 = self.expectedImagesAfterUpDownFlip() boxes_expected = self.createEmptyTestBoxes() images_expected2 = images tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) return [images_diff, images_diff_expected, boxes, boxes_expected] (images_diff_, images_diff_expected_, boxes_, boxes_expected_) = self.execute_cpu(graph_fn, []) self.assertAllClose(boxes_, boxes_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomVerticalFlipWithCache(self): keypoint_flip_permutation = self.createKeypointFlipPermutation() preprocess_options = [ (preprocessor.random_vertical_flip, {'keypoint_flip_permutation': keypoint_flip_permutation})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRunRandomVerticalFlipWithMaskAndKeypoints(self): preprocess_options = [(preprocessor.random_vertical_flip, {})] image_height = 3 image_width = 3 images = tf.random_uniform([1, image_height, image_width, 3]) boxes = self.createTestBoxes() masks = self.createTestMasks() keypoints, _ = self.createTestKeypoints() keypoint_flip_permutation = self.createKeypointFlipPermutation() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_instance_masks: masks, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocess_options = [ (preprocessor.random_vertical_flip, {'keypoint_flip_permutation': keypoint_flip_permutation})] preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True, include_keypoints=True) tensor_dict = preprocessor.preprocess( tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] self.assertIsNotNone(boxes) self.assertIsNotNone(masks) self.assertIsNotNone(keypoints) def testRandomRotation90(self): def graph_fn(): preprocess_options = [(preprocessor.random_rotation90, {})] images = self.expectedImagesAfterNormalization() boxes = self.createTestBoxes() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes } images_expected1 = self.expectedImagesAfterRot90() boxes_expected1 = self.expectedBoxesAfterRot90() images_expected2 = images boxes_expected2 = boxes tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) boxes_diff_expected = tf.zeros_like(boxes_diff) images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) return [ images_diff, images_diff_expected, boxes_diff, boxes_diff_expected ] (images_diff_, images_diff_expected_, boxes_diff_, boxes_diff_expected_) = self.execute_cpu(graph_fn, []) self.assertAllClose(boxes_diff_, boxes_diff_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomRotation90WithEmptyBoxes(self): def graph_fn(): preprocess_options = [(preprocessor.random_rotation90, {})] images = self.expectedImagesAfterNormalization() boxes = self.createEmptyTestBoxes() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes } images_expected1 = self.expectedImagesAfterRot90() boxes_expected = self.createEmptyTestBoxes() images_expected2 = images tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) return [images_diff, images_diff_expected, boxes, boxes_expected] (images_diff_, images_diff_expected_, boxes_, boxes_expected_) = self.execute_cpu(graph_fn, []) self.assertAllClose(boxes_, boxes_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomRotation90WithCache(self): preprocess_options = [(preprocessor.random_rotation90, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRunRandomRotation90WithMaskAndKeypoints(self): image_height = 3 image_width = 3 images = tf.random_uniform([1, image_height, image_width, 3]) boxes = self.createTestBoxes() masks = self.createTestMasks() keypoints, _ = self.createTestKeypoints() keypoint_rot_permutation = self.createKeypointRotPermutation() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_instance_masks: masks, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocess_options = [(preprocessor.random_rotation90, { 'keypoint_rot_permutation': keypoint_rot_permutation })] preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True, include_keypoints=True) tensor_dict = preprocessor.preprocess( tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] self.assertIsNotNone(boxes) self.assertIsNotNone(masks) self.assertIsNotNone(keypoints) def testRandomPixelValueScale(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.cast(images, dtype=tf.float32) * 0.9 / 255.0 images_max = tf.cast(images, dtype=tf.float32) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) return [values_greater, values_less, values_true] (values_greater_, values_less_, values_true_) = self.execute_cpu(graph_fn, []) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_) def testRandomPixelValueScaleWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_pixel_value_scale, {})) self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def testRandomImageScale(self): def graph_fn(): preprocess_options = [(preprocessor.random_image_scale, {})] images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images_scaled = tensor_dict[fields.InputDataFields.image] images_original_shape = tf.shape(images_original) images_scaled_shape = tf.shape(images_scaled) return [images_original_shape, images_scaled_shape] (images_original_shape_, images_scaled_shape_) = self.execute_cpu(graph_fn, []) self.assertLessEqual(images_original_shape_[1] * 0.5, images_scaled_shape_[1]) self.assertGreaterEqual(images_original_shape_[1] * 2.0, images_scaled_shape_[1]) self.assertLessEqual(images_original_shape_[2] * 0.5, images_scaled_shape_[2]) self.assertGreaterEqual(images_original_shape_[2] * 2.0, images_scaled_shape_[2]) def testRandomImageScaleWithCache(self): preprocess_options = [(preprocessor.random_image_scale, {})] self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomRGBtoGray(self): def graph_fn(): preprocess_options = [(preprocessor.random_rgb_to_gray, {})] images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images_gray = tensor_dict[fields.InputDataFields.image] images_gray_r, images_gray_g, images_gray_b = tf.split( value=images_gray, num_or_size_splits=3, axis=3) images_r, images_g, images_b = tf.split( value=images_original, num_or_size_splits=3, axis=3) images_r_diff1 = tf.squared_difference( tf.cast(images_r, dtype=tf.float32), tf.cast(images_gray_r, dtype=tf.float32)) images_r_diff2 = tf.squared_difference( tf.cast(images_gray_r, dtype=tf.float32), tf.cast(images_gray_g, dtype=tf.float32)) images_r_diff = tf.multiply(images_r_diff1, images_r_diff2) images_g_diff1 = tf.squared_difference( tf.cast(images_g, dtype=tf.float32), tf.cast(images_gray_g, dtype=tf.float32)) images_g_diff2 = tf.squared_difference( tf.cast(images_gray_g, dtype=tf.float32), tf.cast(images_gray_b, dtype=tf.float32)) images_g_diff = tf.multiply(images_g_diff1, images_g_diff2) images_b_diff1 = tf.squared_difference( tf.cast(images_b, dtype=tf.float32), tf.cast(images_gray_b, dtype=tf.float32)) images_b_diff2 = tf.squared_difference( tf.cast(images_gray_b, dtype=tf.float32), tf.cast(images_gray_r, dtype=tf.float32)) images_b_diff = tf.multiply(images_b_diff1, images_b_diff2) image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1]) return [images_r_diff, images_g_diff, images_b_diff, image_zero1] (images_r_diff_, images_g_diff_, images_b_diff_, image_zero1_) = self.execute_cpu(graph_fn, []) self.assertAllClose(images_r_diff_, image_zero1_) self.assertAllClose(images_g_diff_, image_zero1_) self.assertAllClose(images_b_diff_, image_zero1_) def testRandomRGBtoGrayWithCache(self): preprocess_options = [( preprocessor.random_rgb_to_gray, {'probability': 0.5})] self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomAdjustBrightness(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_adjust_brightness, {})) images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_bright = tensor_dict[fields.InputDataFields.image] image_original_shape = tf.shape(images_original) image_bright_shape = tf.shape(images_bright) return [image_original_shape, image_bright_shape] (image_original_shape_, image_bright_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(image_original_shape_, image_bright_shape_) def testRandomAdjustBrightnessWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_adjust_brightness, {})) self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomAdjustContrast(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_adjust_contrast, {})) images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_contrast = tensor_dict[fields.InputDataFields.image] image_original_shape = tf.shape(images_original) image_contrast_shape = tf.shape(images_contrast) return [image_original_shape, image_contrast_shape] (image_original_shape_, image_contrast_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(image_original_shape_, image_contrast_shape_) def testRandomAdjustContrastWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_adjust_contrast, {})) self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomAdjustHue(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_adjust_hue, {})) images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_hue = tensor_dict[fields.InputDataFields.image] image_original_shape = tf.shape(images_original) image_hue_shape = tf.shape(images_hue) return [image_original_shape, image_hue_shape] (image_original_shape_, image_hue_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(image_original_shape_, image_hue_shape_) def testRandomAdjustHueWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_adjust_hue, {})) self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomDistortColor(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_distort_color, {})) images_original = self.createTestImages() images_original_shape = tf.shape(images_original) tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_distorted_color = tensor_dict[fields.InputDataFields.image] images_distorted_color_shape = tf.shape(images_distorted_color) return [images_original_shape, images_distorted_color_shape] (images_original_shape_, images_distorted_color_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(images_original_shape_, images_distorted_color_shape_) def testRandomDistortColorWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_distort_color, {})) self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomJitterBoxes(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {})) boxes = self.createRandomTextBoxes() boxes_shape = tf.shape(boxes) tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] distorted_boxes_shape = tf.shape(distorted_boxes) return [boxes_shape, distorted_boxes_shape] (boxes_shape_, distorted_boxes_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_shape_, distorted_boxes_shape_) @parameterized.parameters( ['expand', 'shrink', 'expand_symmetric', 'shrink_symmetric', 'expand_symmetric_xy', 'shrink_symmetric_xy'] ) def testRandomJitterBoxesZeroRatio(self, jitter_mode): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {'ratio': .0, 'jitter_mode': jitter_mode})) boxes = self.createRandomTextBoxes() tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] return [boxes, distorted_boxes] (boxes, distorted_boxes) = self.execute_cpu(graph_fn, []) self.assertAllClose(boxes, distorted_boxes) def testRandomJitterBoxesExpand(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {'jitter_mode': 'expand'})) boxes = self.createRandomTextBoxes() tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] return [boxes, distorted_boxes] boxes, distorted_boxes = self.execute_cpu(graph_fn, []) ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = ( distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2], distorted_boxes[:, 3]) self.assertTrue(np.all(distorted_ymin <= ymin)) self.assertTrue(np.all(distorted_xmin <= xmin)) self.assertTrue(np.all(distorted_ymax >= ymax)) self.assertTrue(np.all(distorted_xmax >= xmax)) def testRandomJitterBoxesExpandSymmetric(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {'jitter_mode': 'expand_symmetric'})) boxes = self.createRandomTextBoxes() tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] return [boxes, distorted_boxes] boxes, distorted_boxes = self.execute_cpu(graph_fn, []) ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = ( distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2], distorted_boxes[:, 3]) self.assertTrue(np.all(distorted_ymin <= ymin)) self.assertTrue(np.all(distorted_xmin <= xmin)) self.assertTrue(np.all(distorted_ymax >= ymax)) self.assertTrue(np.all(distorted_xmax >= xmax)) self.assertAllClose(ymin - distorted_ymin, distorted_ymax - ymax, rtol=1e-5) self.assertAllClose(xmin - distorted_xmin, distorted_xmax - xmax, rtol=1e-5) def testRandomJitterBoxesExpandSymmetricXY(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {'jitter_mode': 'expand_symmetric_xy'})) boxes = self.createRandomTextBoxes() tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] return [boxes, distorted_boxes] boxes, distorted_boxes = self.execute_cpu(graph_fn, []) ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = ( distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2], distorted_boxes[:, 3]) self.assertTrue(np.all(distorted_ymin <= ymin)) self.assertTrue(np.all(distorted_xmin <= xmin)) self.assertTrue(np.all(distorted_ymax >= ymax)) self.assertTrue(np.all(distorted_xmax >= xmax)) self.assertAllClose(ymin - distorted_ymin, distorted_ymax - ymax, rtol=1e-5) self.assertAllClose(xmin - distorted_xmin, distorted_xmax - xmax, rtol=1e-5) height, width = tf.maximum(1e-6, ymax - ymin), tf.maximum(1e-6, xmax - xmin) self.assertAllClose((distorted_ymax - ymax) / height, (distorted_xmax - xmax) / width, rtol=1e-5) self.assertAllLessEqual((distorted_ymax - ymax) / height, 0.05) self.assertAllGreaterEqual((distorted_ymax - ymax) / width, 0.00) def testRandomJitterBoxesShrink(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {'jitter_mode': 'shrink'})) boxes = self.createTestBoxes() tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] return [boxes, distorted_boxes] boxes, distorted_boxes = self.execute_cpu(graph_fn, []) ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = ( distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2], distorted_boxes[:, 3]) self.assertTrue(np.all(distorted_ymin >= ymin)) self.assertTrue(np.all(distorted_xmin >= xmin)) self.assertTrue(np.all(distorted_ymax <= ymax)) self.assertTrue(np.all(distorted_xmax <= xmax)) def testRandomJitterBoxesShrinkSymmetric(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {'jitter_mode': 'shrink_symmetric'})) boxes = self.createTestBoxes() tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] return [boxes, distorted_boxes] boxes, distorted_boxes = self.execute_cpu(graph_fn, []) ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = ( distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2], distorted_boxes[:, 3]) self.assertTrue(np.all(distorted_ymin >= ymin)) self.assertTrue(np.all(distorted_xmin >= xmin)) self.assertTrue(np.all(distorted_ymax <= ymax)) self.assertTrue(np.all(distorted_xmax <= xmax)) self.assertAllClose(ymin - distorted_ymin, distorted_ymax - ymax, rtol=1e-5) self.assertAllClose(xmin - distorted_xmin, distorted_xmax - xmax, rtol=1e-5) def testRandomJitterBoxesShrinkSymmetricXY(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {'jitter_mode': 'shrink_symmetric_xy'})) boxes = self.createTestBoxes() tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] return [boxes, distorted_boxes] boxes, distorted_boxes = self.execute_cpu(graph_fn, []) ymin, xmin, ymax, xmax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] distorted_ymin, distorted_xmin, distorted_ymax, distorted_xmax = ( distorted_boxes[:, 0], distorted_boxes[:, 1], distorted_boxes[:, 2], distorted_boxes[:, 3]) self.assertTrue(np.all(distorted_ymin >= ymin)) self.assertTrue(np.all(distorted_xmin >= xmin)) self.assertTrue(np.all(distorted_ymax <= ymax)) self.assertTrue(np.all(distorted_xmax <= xmax)) self.assertAllClose(ymin - distorted_ymin, distorted_ymax - ymax, rtol=1e-5) self.assertAllClose(xmin - distorted_xmin, distorted_xmax - xmax, rtol=1e-5) height, width = tf.maximum(1e-6, ymax - ymin), tf.maximum(1e-6, xmax - xmin) self.assertAllClose((ymax - distorted_ymax) / height, (xmax - distorted_xmax) / width, rtol=1e-5) self.assertAllLessEqual((ymax - distorted_ymax) / height, 0.05) self.assertAllGreaterEqual((ymax - distorted_ymax)/ width, 0.00) def testRandomCropImage(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_crop_image, {})) images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) return [ boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank ] (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testRandomCropImageWithCache(self): preprocess_options = [(preprocessor.random_rgb_to_gray, {'probability': 0.5}), (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1, }), (preprocessor.random_crop_image, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def testRandomCropImageGrayscale(self): def graph_fn(): preprocessing_options = [(preprocessor.rgb_to_gray, {}), (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1, }), (preprocessor.random_crop_image, {})] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) return [ boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank ] (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testRandomCropImageWithBoxOutOfImage(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_crop_image, {})) images = self.createTestImages() boxes = self.createTestBoxesOutOfImage() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) return [ boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank ] (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testRandomCropImageWithRandomCoefOne(self): def graph_fn(): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_image, { 'random_coef': 1.0 })] distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_weights = distorted_tensor_dict[ fields.InputDataFields.groundtruth_weights] boxes_shape = tf.shape(boxes) distorted_boxes_shape = tf.shape(distorted_boxes) images_shape = tf.shape(images) distorted_images_shape = tf.shape(distorted_images) return [ boxes_shape, distorted_boxes_shape, images_shape, distorted_images_shape, images, distorted_images, boxes, distorted_boxes, labels, distorted_labels, weights, distorted_weights ] (boxes_shape_, distorted_boxes_shape_, images_shape_, distorted_images_shape_, images_, distorted_images_, boxes_, distorted_boxes_, labels_, distorted_labels_, weights_, distorted_weights_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_shape_, distorted_boxes_shape_) self.assertAllEqual(images_shape_, distorted_images_shape_) self.assertAllClose(images_, distorted_images_) self.assertAllClose(boxes_, distorted_boxes_) self.assertAllEqual(labels_, distorted_labels_) self.assertAllEqual(weights_, distorted_weights_) def testRandomCropWithMockSampleDistortedBoundingBox(self): def graph_fn(): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createColorfulTestImage() boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], [0.3, 0.1, 0.4, 0.7]], dtype=tf.float32) labels = tf.constant([1, 7, 11], dtype=tf.int32) weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object(tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = (tf.constant( [6, 143, 0], dtype=tf.int32), tf.constant( [190, 237, -1], dtype=tf.int32), tf.constant( [[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_weights = distorted_tensor_dict[ fields.InputDataFields.groundtruth_weights] expected_boxes = tf.constant( [[0.178947, 0.07173, 0.75789469, 0.66244733], [0.28421, 0.0, 0.38947365, 0.57805908]], dtype=tf.float32) expected_labels = tf.constant([7, 11], dtype=tf.int32) expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32) return [ distorted_boxes, distorted_labels, distorted_weights, expected_boxes, expected_labels, expected_weights ] (distorted_boxes_, distorted_labels_, distorted_weights_, expected_boxes_, expected_labels_, expected_weights_) = self.execute_cpu(graph_fn, []) self.assertAllClose(distorted_boxes_, expected_boxes_) self.assertAllEqual(distorted_labels_, expected_labels_) self.assertAllEqual(distorted_weights_, expected_weights_) def testRandomCropWithoutClipBoxes(self): def graph_fn(): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createColorfulTestImage() boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], [0.3, 0.1, 0.4, 0.7]], dtype=tf.float32) keypoints = tf.constant([ [[0.1, 0.1], [0.8, 0.3]], [[0.2, 0.4], [0.75, 0.75]], [[0.3, 0.1], [0.4, 0.7]], ], dtype=tf.float32) labels = tf.constant([1, 7, 11], dtype=tf.int32) weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_keypoints: keypoints, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) preprocessing_options = [(preprocessor.random_crop_image, { 'clip_boxes': False, })] preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) with mock.patch.object(tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = (tf.constant( [6, 143, 0], dtype=tf.int32), tf.constant( [190, 237, -1], dtype=tf.int32), tf.constant( [[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_weights = distorted_tensor_dict[ fields.InputDataFields.groundtruth_weights] expected_boxes = tf.constant( [[0.178947, 0.07173, 0.75789469, 0.66244733], [0.28421, -0.434599, 0.38947365, 0.57805908]], dtype=tf.float32) expected_keypoints = tf.constant( [[[0.178947, 0.07173], [0.75789469, 0.66244733]], [[0.28421, -0.434599], [0.38947365, 0.57805908]]], dtype=tf.float32) expected_labels = tf.constant([7, 11], dtype=tf.int32) expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32) return [distorted_boxes, distorted_keypoints, distorted_labels, distorted_weights, expected_boxes, expected_keypoints, expected_labels, expected_weights] (distorted_boxes_, distorted_keypoints_, distorted_labels_, distorted_weights_, expected_boxes_, expected_keypoints_, expected_labels_, expected_weights_) = self.execute_cpu(graph_fn, []) self.assertAllClose(distorted_boxes_, expected_boxes_) self.assertAllClose(distorted_keypoints_, expected_keypoints_) self.assertAllEqual(distorted_labels_, expected_labels_) self.assertAllEqual(distorted_weights_, expected_weights_) def testRandomCropImageWithMultiClassScores(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_crop_image, {})) images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() multiclass_scores = self.createTestMultiClassScores() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.multiclass_scores: multiclass_scores } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_multiclass_scores = distorted_tensor_dict[ fields.InputDataFields.multiclass_scores] boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) multiclass_scores_rank = tf.rank(multiclass_scores) distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores) return [ boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank, distorted_images_rank, multiclass_scores_rank, distorted_multiclass_scores_rank, distorted_multiclass_scores ] (boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_, distorted_images_rank_, multiclass_scores_rank_, distorted_multiclass_scores_rank_, distorted_multiclass_scores_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) self.assertAllEqual(multiclass_scores_rank_, distorted_multiclass_scores_rank_) self.assertAllEqual(distorted_boxes_.shape[0], distorted_multiclass_scores_.shape[0]) def testStrictRandomCropImageWithGroundtruthWeights(self): def graph_fn(): image = self.createColorfulTestImage()[0] boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) new_image, new_boxes, new_labels, new_groundtruth_weights = ( preprocessor._strict_random_crop_image( image, boxes, labels, weights)) return [new_image, new_boxes, new_labels, new_groundtruth_weights] (new_image, new_boxes, _, new_groundtruth_weights) = self.execute_cpu(graph_fn, []) expected_boxes = np.array( [[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) self.assertAllEqual(new_image.shape, [190, 237, 3]) self.assertAllEqual(new_groundtruth_weights, [1.0, 0.5]) self.assertAllClose( new_boxes.flatten(), expected_boxes.flatten()) def testStrictRandomCropImageWithMasks(self): def graph_fn(): image = self.createColorfulTestImage()[0] boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) new_image, new_boxes, new_labels, new_weights, new_masks = ( preprocessor._strict_random_crop_image( image, boxes, labels, weights, masks=masks)) return [new_image, new_boxes, new_labels, new_weights, new_masks] (new_image, new_boxes, _, _, new_masks) = self.execute_cpu(graph_fn, []) expected_boxes = np.array( [[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) self.assertAllEqual(new_image.shape, [190, 237, 3]) self.assertAllEqual(new_masks.shape, [2, 190, 237]) self.assertAllClose( new_boxes.flatten(), expected_boxes.flatten()) def testStrictRandomCropImageWithMaskWeights(self): def graph_fn(): image = self.createColorfulTestImage()[0] boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) mask_weights = tf.constant([1.0, 0.0], dtype=tf.float32) with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) results = preprocessor._strict_random_crop_image( image, boxes, labels, weights, masks=masks, mask_weights=mask_weights) return results (new_image, new_boxes, _, _, new_masks, new_mask_weights) = self.execute_cpu(graph_fn, []) expected_boxes = np.array( [[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) self.assertAllEqual(new_image.shape, [190, 237, 3]) self.assertAllEqual(new_masks.shape, [2, 190, 237]) self.assertAllClose(new_mask_weights, [1.0, 0.0]) self.assertAllClose( new_boxes.flatten(), expected_boxes.flatten()) def testStrictRandomCropImageWithKeypoints(self): def graph_fn(): image = self.createColorfulTestImage()[0] boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints, keypoint_visibilities = self.createTestKeypoints() with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) (new_image, new_boxes, new_labels, new_weights, new_keypoints, new_keypoint_visibilities) = preprocessor._strict_random_crop_image( image, boxes, labels, weights, keypoints=keypoints, keypoint_visibilities=keypoint_visibilities) return [new_image, new_boxes, new_labels, new_weights, new_keypoints, new_keypoint_visibilities] (new_image, new_boxes, _, _, new_keypoints, new_keypoint_visibilities) = self.execute_cpu(graph_fn, []) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0],], dtype=np.float32) expected_keypoints = np.array([ [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]] ], dtype=np.float32) expected_keypoint_visibilities = [ [False, False, False], [False, True, True] ] self.assertAllEqual(new_image.shape, [190, 237, 3]) self.assertAllClose( new_boxes, expected_boxes) self.assertAllClose( new_keypoints, expected_keypoints) self.assertAllEqual( new_keypoint_visibilities, expected_keypoint_visibilities) def testRunRandomCropImageWithMasks(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) mask_weights = tf.constant([1.0, 0.0], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_instance_masks: masks, fields.InputDataFields.groundtruth_instance_mask_weights: mask_weights } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True, include_instance_mask_weights=True) preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_masks = distorted_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] distorted_mask_weights = distorted_tensor_dict[ fields.InputDataFields.groundtruth_instance_mask_weights] return [distorted_image, distorted_boxes, distorted_labels, distorted_masks, distorted_mask_weights] (distorted_image_, distorted_boxes_, distorted_labels_, distorted_masks_, distorted_mask_weights_) = self.execute_cpu(graph_fn, []) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0], ], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) self.assertAllEqual(distorted_masks_.shape, [2, 190, 237]) self.assertAllClose(distorted_mask_weights_, [1.0, 0.0]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose( distorted_boxes_.flatten(), expected_boxes.flatten()) def testRunRandomCropImageWithKeypointsInsideCrop(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints = self.createTestKeypointsInsideCrop() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_keypoints: keypoints, fields.InputDataFields.groundtruth_weights: weights } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] return [distorted_image, distorted_boxes, distorted_labels, distorted_keypoints] (distorted_image_, distorted_boxes_, distorted_labels_, distorted_keypoints_) = self.execute_cpu(graph_fn, []) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0], ], dtype=np.float32) expected_keypoints = np.array([ [[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]], [[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]] ]) self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose( distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllClose( distorted_keypoints_.flatten(), expected_keypoints.flatten()) def testRunRandomCropImageWithKeypointsOutsideCrop(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints = self.createTestKeypointsOutsideCrop() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] return [distorted_image, distorted_boxes, distorted_labels, distorted_keypoints] (distorted_image_, distorted_boxes_, distorted_labels_, distorted_keypoints_) = self.execute_cpu(graph_fn, []) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0], ], dtype=np.float32) expected_keypoints = np.array([ [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], ]) self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose( distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllClose( distorted_keypoints_.flatten(), expected_keypoints.flatten()) def testRunRandomCropImageWithDensePose(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() dp_num_points, dp_part_ids, dp_surface_coords = self.createTestDensePose() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_dp_num_points: dp_num_points, fields.InputDataFields.groundtruth_dp_part_ids: dp_part_ids, fields.InputDataFields.groundtruth_dp_surface_coords: dp_surface_coords } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_dense_pose=True) preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 40, 0], dtype=tf.int32), tf.constant([134, 340, -1], dtype=tf.int32), tf.constant([[[0.03, 0.1, 0.7, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_dp_num_points = distorted_tensor_dict[ fields.InputDataFields.groundtruth_dp_num_points] distorted_dp_part_ids = distorted_tensor_dict[ fields.InputDataFields.groundtruth_dp_part_ids] distorted_dp_surface_coords = distorted_tensor_dict[ fields.InputDataFields.groundtruth_dp_surface_coords] return [distorted_image, distorted_dp_num_points, distorted_dp_part_ids, distorted_dp_surface_coords] (distorted_image_, distorted_dp_num_points_, distorted_dp_part_ids_, distorted_dp_surface_coords_) = self.execute_cpu(graph_fn, []) expected_dp_num_points = np.array([1, 1]) expected_dp_part_ids = np.array([[4], [0]]) expected_dp_surface_coords = np.array([ [[0.10447761, 0.1176470, 0.6, 0.7]], [[0.10447761, 0.2352941, 0.2, 0.8]], ]) self.assertAllEqual(distorted_image_.shape, [1, 134, 340, 3]) self.assertAllEqual(distorted_dp_num_points_, expected_dp_num_points) self.assertAllEqual(distorted_dp_part_ids_, expected_dp_part_ids) self.assertAllClose(distorted_dp_surface_coords_, expected_dp_surface_coords) def testRunRetainBoxesAboveThreshold(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } preprocessing_options = [ (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) ] preprocessor_arg_map = preprocessor.get_default_func_arg_map() retained_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) retained_boxes = retained_tensor_dict[ fields.InputDataFields.groundtruth_boxes] retained_labels = retained_tensor_dict[ fields.InputDataFields.groundtruth_classes] retained_weights = retained_tensor_dict[ fields.InputDataFields.groundtruth_weights] return [retained_boxes, retained_labels, retained_weights, self.expectedBoxesAfterThresholding(), self.expectedLabelsAfterThresholding(), self.expectedLabelScoresAfterThresholding()] (retained_boxes_, retained_labels_, retained_weights_, expected_retained_boxes_, expected_retained_labels_, expected_retained_weights_) = self.execute_cpu(graph_fn, []) self.assertAllClose(retained_boxes_, expected_retained_boxes_) self.assertAllClose(retained_labels_, expected_retained_labels_) self.assertAllClose( retained_weights_, expected_retained_weights_) def testRunRetainBoxesAboveThresholdWithMasks(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = self.createTestMasks() tensor_dict = { fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_instance_masks: masks } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_label_weights=True, include_instance_masks=True) preprocessing_options = [ (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) ] retained_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) retained_masks = retained_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] return [retained_masks, self.expectedMasksAfterThresholding()] (retained_masks_, expected_masks_) = self.execute(graph_fn, []) self.assertAllClose(retained_masks_, expected_masks_) def testRunRetainBoxesAboveThresholdWithKeypoints(self): def graph_fn(): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints, _ = self.createTestKeypoints() tensor_dict = { fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [ (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) ] retained_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) retained_keypoints = retained_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] return [retained_keypoints, self.expectedKeypointsAfterThresholding()] (retained_keypoints_, expected_keypoints_) = self.execute_cpu(graph_fn, []) self.assertAllClose(retained_keypoints_, expected_keypoints_) def testRandomCropToAspectRatioWithCache(self): preprocess_options = [(preprocessor.random_crop_to_aspect_ratio, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def testRunRandomCropToAspectRatioWithMasks(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_instance_masks: masks } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True) preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})] with mock.patch.object(preprocessor, '_random_integer') as mock_random_integer: mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_masks = distorted_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] return [ distorted_image, distorted_boxes, distorted_labels, distorted_masks ] (distorted_image_, distorted_boxes_, distorted_labels_, distorted_masks_) = self.execute_cpu(graph_fn, []) expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) self.assertAllEqual(distorted_labels_, [1]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllEqual(distorted_masks_.shape, [1, 200, 200]) def testRunRandomCropToAspectRatioCenterCrop(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_instance_masks: masks } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True) preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, { 'center_crop': True })] with mock.patch.object(preprocessor, '_random_integer') as mock_random_integer: mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] return [ distorted_image, distorted_boxes, distorted_labels ] (distorted_image_, distorted_boxes_, distorted_labels_) = self.execute_cpu( graph_fn, []) expected_boxes = np.array([[0.0, 0.0, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) def testRunRandomCropToAspectRatioWithKeypoints(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints, _ = self.createTestKeypoints() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})] with mock.patch.object(preprocessor, '_random_integer') as mock_random_integer: mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] return [distorted_image, distorted_boxes, distorted_labels, distorted_keypoints] (distorted_image_, distorted_boxes_, distorted_labels_, distorted_keypoints_) = self.execute_cpu(graph_fn, []) expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32) expected_keypoints = np.array( [[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) self.assertAllEqual(distorted_labels_, [1]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllClose(distorted_keypoints_.flatten(), expected_keypoints.flatten()) def testRandomPadToAspectRatioWithCache(self): preprocess_options = [(preprocessor.random_pad_to_aspect_ratio, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRunRandomPadToAspectRatioWithMinMaxPaddedSizeRatios(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels } preprocessor_arg_map = preprocessor.get_default_func_arg_map() preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {'min_padded_size_ratio': (4.0, 4.0), 'max_padded_size_ratio': (4.0, 4.0)})] distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] return [distorted_image, distorted_boxes, distorted_labels] distorted_image_, distorted_boxes_, distorted_labels_ = self.execute_cpu( graph_fn, []) expected_boxes = np.array( [[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 800, 800, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) def testRunRandomPadToAspectRatioWithMasks(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_instance_masks: masks } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True) preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})] distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_masks = distorted_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] return [ distorted_image, distorted_boxes, distorted_labels, distorted_masks ] (distorted_image_, distorted_boxes_, distorted_labels_, distorted_masks_) = self.execute_cpu(graph_fn, []) expected_boxes = np.array( [[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllEqual(distorted_masks_.shape, [2, 400, 400]) def testRunRandomPadToAspectRatioWithKeypoints(self): def graph_fn(): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() keypoints, _ = self.createTestKeypoints() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})] distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] return [ distorted_image, distorted_boxes, distorted_labels, distorted_keypoints ] (distorted_image_, distorted_boxes_, distorted_labels_, distorted_keypoints_) = self.execute_cpu(graph_fn, []) expected_boxes = np.array( [[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32) expected_keypoints = np.array([ [[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]], [[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]], ], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllClose(distorted_keypoints_.flatten(), expected_keypoints.flatten()) def testRandomPadImageWithCache(self): preprocess_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1,}), (preprocessor.random_pad_image, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomPadImage(self): def graph_fn(): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_pad_image, {})] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) return [boxes_shape, padded_boxes_shape, images_shape, padded_images_shape, boxes, padded_boxes] (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_, boxes_, padded_boxes_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( padded_boxes_[:, 2] - padded_boxes_[:, 0]))) self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( padded_boxes_[:, 3] - padded_boxes_[:, 1]))) def testRandomPadImageCenterPad(self): def graph_fn(): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_pad_image, { 'center_pad': True, 'min_image_size': [400, 400], 'max_image_size': [400, 400], })] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] padded_labels = padded_tensor_dict[ fields.InputDataFields.groundtruth_classes] return [padded_images, padded_boxes, padded_labels] (padded_images_, padded_boxes_, padded_labels_) = self.execute_cpu( graph_fn, []) expected_boxes = np.array([[0.25, 0.25, 0.625, 1.0], [0.375, 0.5, .625, 1.0]], dtype=np.float32) self.assertAllEqual(padded_images_.shape, [1, 400, 400, 3]) self.assertAllEqual(padded_labels_, [1, 2]) self.assertAllClose(padded_boxes_.flatten(), expected_boxes.flatten()) @parameterized.parameters( {'include_dense_pose': False}, ) def testRandomPadImageWithKeypointsAndMasks(self, include_dense_pose): def graph_fn(): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() masks = self.createTestMasks() keypoints, _ = self.createTestKeypoints() _, _, dp_surface_coords = self.createTestDensePose() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_instance_masks: masks, fields.InputDataFields.groundtruth_keypoints: keypoints, fields.InputDataFields.groundtruth_dp_surface_coords: dp_surface_coords } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_pad_image, {})] func_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True, include_keypoints=True, include_keypoint_visibilities=True, include_dense_pose=include_dense_pose) padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options, func_arg_map=func_arg_map) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] padded_masks = padded_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] padded_keypoints = padded_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) padded_masks_shape = tf.shape(padded_masks) keypoints_shape = tf.shape(keypoints) padded_keypoints_shape = tf.shape(padded_keypoints) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) outputs = [boxes_shape, padded_boxes_shape, padded_masks_shape, keypoints_shape, padded_keypoints_shape, images_shape, padded_images_shape, boxes, padded_boxes, keypoints, padded_keypoints] if include_dense_pose: padded_dp_surface_coords = padded_tensor_dict[ fields.InputDataFields.groundtruth_dp_surface_coords] outputs.extend([dp_surface_coords, padded_dp_surface_coords]) return outputs outputs = self.execute_cpu(graph_fn, []) boxes_shape_ = outputs[0] padded_boxes_shape_ = outputs[1] padded_masks_shape_ = outputs[2] keypoints_shape_ = outputs[3] padded_keypoints_shape_ = outputs[4] images_shape_ = outputs[5] padded_images_shape_ = outputs[6] boxes_ = outputs[7] padded_boxes_ = outputs[8] keypoints_ = outputs[9] padded_keypoints_ = outputs[10] self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertAllEqual(keypoints_shape_, padded_keypoints_shape_) self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) self.assertAllEqual(padded_masks_shape_[1:3], padded_images_shape_[1:3]) self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( padded_boxes_[:, 2] - padded_boxes_[:, 0]))) self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( padded_boxes_[:, 3] - padded_boxes_[:, 1]))) self.assertTrue(np.all((keypoints_[1, :, 0] - keypoints_[0, :, 0]) >= ( padded_keypoints_[1, :, 0] - padded_keypoints_[0, :, 0]))) self.assertTrue(np.all((keypoints_[1, :, 1] - keypoints_[0, :, 1]) >= ( padded_keypoints_[1, :, 1] - padded_keypoints_[0, :, 1]))) if include_dense_pose: dp_surface_coords = outputs[11] padded_dp_surface_coords = outputs[12] self.assertAllClose(padded_dp_surface_coords[:, :, 2:], dp_surface_coords[:, :, 2:]) def testRandomAbsolutePadImage(self): height_padding = 10 width_padding = 20 def graph_fn(): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } preprocessing_options = [(preprocessor.random_absolute_pad_image, { 'max_height_padding': height_padding, 'max_width_padding': width_padding})] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) original_shape = tf.shape(images) final_shape = tf.shape(padded_tensor_dict[fields.InputDataFields.image]) return original_shape, final_shape for _ in range(100): original_shape, output_shape = self.execute_cpu(graph_fn, []) _, height, width, _ = original_shape self.assertGreaterEqual(output_shape[1], height) self.assertLess(output_shape[1], height + height_padding) self.assertGreaterEqual(output_shape[2], width) self.assertLess(output_shape[2], width + width_padding) def testRandomAbsolutePadImageWithKeypoints(self): height_padding = 10 width_padding = 20 def graph_fn(): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() keypoints, _ = self.createTestKeypoints() tensor_dict = { fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_keypoints: keypoints, } preprocessing_options = [(preprocessor.random_absolute_pad_image, { 'max_height_padding': height_padding, 'max_width_padding': width_padding })] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) original_shape = tf.shape(images) final_shape = tf.shape(padded_tensor_dict[fields.InputDataFields.image]) padded_keypoints = padded_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] return (original_shape, final_shape, padded_keypoints) for _ in range(100): original_shape, output_shape, padded_keypoints_ = self.execute_cpu( graph_fn, []) _, height, width, _ = original_shape self.assertGreaterEqual(output_shape[1], height) self.assertLess(output_shape[1], height + height_padding) self.assertGreaterEqual(output_shape[2], width) self.assertLess(output_shape[2], width + width_padding) # Verify the keypoints are populated. The correctness of the keypoint # coordinates are already tested in random_pad_image function. self.assertEqual(padded_keypoints_.shape, (2, 3, 2)) def testRandomCropPadImageWithCache(self): preprocess_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomCropPadImageWithRandomCoefOne(self): def graph_fn(): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_pad_image, { 'random_coef': 1.0 })] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) return [boxes_shape, padded_boxes_shape, images_shape, padded_images_shape, boxes, padded_boxes] (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_, boxes_, padded_boxes_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( padded_boxes_[:, 2] - padded_boxes_[:, 0]))) self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( padded_boxes_[:, 3] - padded_boxes_[:, 1]))) def testRandomCropToAspectRatio(self): def graph_fn(): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } tensor_dict = preprocessor.preprocess(tensor_dict, []) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, { 'aspect_ratio': 2.0 })] cropped_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) cropped_images = cropped_tensor_dict[fields.InputDataFields.image] cropped_boxes = cropped_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) cropped_boxes_shape = tf.shape(cropped_boxes) images_shape = tf.shape(images) cropped_images_shape = tf.shape(cropped_images) return [ boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape ] (boxes_shape_, cropped_boxes_shape_, images_shape_, cropped_images_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_shape_, cropped_boxes_shape_) self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2) self.assertEqual(images_shape_[2], cropped_images_shape_[2]) def testRandomPadToAspectRatio(self): def graph_fn(): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } tensor_dict = preprocessor.preprocess(tensor_dict, []) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, { 'aspect_ratio': 2.0 })] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) return [ boxes_shape, padded_boxes_shape, images_shape, padded_images_shape ] (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertEqual(images_shape_[1], padded_images_shape_[1]) self.assertEqual(2 * images_shape_[2], padded_images_shape_[2]) def testRandomBlackPatchesWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_black_patches, { 'size_to_image_ratio': 0.5 })) self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomBlackPatches(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_black_patches, { 'size_to_image_ratio': 0.5 })) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} blacked_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) blacked_images = blacked_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) blacked_images_shape = tf.shape(blacked_images) return [images_shape, blacked_images_shape] (images_shape_, blacked_images_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(images_shape_, blacked_images_shape_) def testRandomJpegQuality(self): def graph_fn(): preprocessing_options = [(preprocessor.random_jpeg_quality, { 'min_jpeg_quality': 0, 'max_jpeg_quality': 100 })] images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) encoded_images = processed_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) encoded_images_shape = tf.shape(encoded_images) return [images_shape, encoded_images_shape] images_shape_out, encoded_images_shape_out = self.execute_cpu(graph_fn, []) self.assertAllEqual(images_shape_out, encoded_images_shape_out) def testRandomJpegQualityKeepsStaticChannelShape(self): # Set at least three weeks past the forward compatibility horizon for # tf 1.14 of 2019/11/01. # https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/compat/compat.py#L30 if not tf.compat.forward_compatible(year=2019, month=12, day=1): self.skipTest('Skipping test for future functionality.') preprocessing_options = [(preprocessor.random_jpeg_quality, { 'min_jpeg_quality': 0, 'max_jpeg_quality': 100 })] images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) encoded_images = processed_tensor_dict[fields.InputDataFields.image] images_static_channels = images.shape[-1] encoded_images_static_channels = encoded_images.shape[-1] self.assertEqual(images_static_channels, encoded_images_static_channels) def testRandomJpegQualityWithCache(self): preprocessing_options = [(preprocessor.random_jpeg_quality, { 'min_jpeg_quality': 0, 'max_jpeg_quality': 100 })] self._testPreprocessorCache(preprocessing_options) def testRandomJpegQualityWithRandomCoefOne(self): def graph_fn(): preprocessing_options = [(preprocessor.random_jpeg_quality, { 'random_coef': 1.0 })] images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) encoded_images = processed_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) encoded_images_shape = tf.shape(encoded_images) return [images, encoded_images, images_shape, encoded_images_shape] (images_out, encoded_images_out, images_shape_out, encoded_images_shape_out) = self.execute_cpu(graph_fn, []) self.assertAllEqual(images_shape_out, encoded_images_shape_out) self.assertAllEqual(images_out, encoded_images_out) def testRandomDownscaleToTargetPixels(self): def graph_fn(): preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, { 'min_target_pixels': 100, 'max_target_pixels': 101 })] images = tf.random_uniform([1, 25, 100, 3]) tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) downscaled_images = processed_tensor_dict[fields.InputDataFields.image] downscaled_shape = tf.shape(downscaled_images) return downscaled_shape expected_shape = [1, 5, 20, 3] downscaled_shape_out = self.execute_cpu(graph_fn, []) self.assertAllEqual(downscaled_shape_out, expected_shape) def testRandomDownscaleToTargetPixelsWithMasks(self): def graph_fn(): preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, { 'min_target_pixels': 100, 'max_target_pixels': 101 })] images = tf.random_uniform([1, 25, 100, 3]) masks = tf.random_uniform([10, 25, 100]) tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_instance_masks: masks } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True) processed_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) downscaled_images = processed_tensor_dict[fields.InputDataFields.image] downscaled_masks = processed_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] downscaled_images_shape = tf.shape(downscaled_images) downscaled_masks_shape = tf.shape(downscaled_masks) return [downscaled_images_shape, downscaled_masks_shape] expected_images_shape = [1, 5, 20, 3] expected_masks_shape = [10, 5, 20] (downscaled_images_shape_out, downscaled_masks_shape_out) = self.execute_cpu(graph_fn, []) self.assertAllEqual(downscaled_images_shape_out, expected_images_shape) self.assertAllEqual(downscaled_masks_shape_out, expected_masks_shape) @parameterized.parameters( {'test_masks': False}, {'test_masks': True} ) def testRandomDownscaleToTargetPixelsWithCache(self, test_masks): preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, { 'min_target_pixels': 100, 'max_target_pixels': 999 })] self._testPreprocessorCache(preprocessing_options, test_masks=test_masks) def testRandomDownscaleToTargetPixelsWithRandomCoefOne(self): def graph_fn(): preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, { 'random_coef': 1.0, 'min_target_pixels': 10, 'max_target_pixels': 20, })] images = tf.random_uniform([1, 25, 100, 3]) tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) downscaled_images = processed_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) downscaled_images_shape = tf.shape(downscaled_images) return [images, downscaled_images, images_shape, downscaled_images_shape] (images_out, downscaled_images_out, images_shape_out, downscaled_images_shape_out) = self.execute_cpu(graph_fn, []) self.assertAllEqual(images_shape_out, downscaled_images_shape_out) self.assertAllEqual(images_out, downscaled_images_out) def testRandomDownscaleToTargetPixelsIgnoresSmallImages(self): def graph_fn(): preprocessing_options = [(preprocessor.random_downscale_to_target_pixels, { 'min_target_pixels': 1000, 'max_target_pixels': 1001 })] images = tf.random_uniform([1, 10, 10, 3]) tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) downscaled_images = processed_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) downscaled_images_shape = tf.shape(downscaled_images) return [images, downscaled_images, images_shape, downscaled_images_shape] (images_out, downscaled_images_out, images_shape_out, downscaled_images_shape_out) = self.execute_cpu(graph_fn, []) self.assertAllEqual(images_shape_out, downscaled_images_shape_out) self.assertAllEqual(images_out, downscaled_images_out) def testRandomPatchGaussianShape(self): preprocessing_options = [(preprocessor.random_patch_gaussian, { 'min_patch_size': 1, 'max_patch_size': 200, 'min_gaussian_stddev': 0.0, 'max_gaussian_stddev': 2.0 })] images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) patched_images = processed_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) patched_images_shape = tf.shape(patched_images) self.assertAllEqual(images_shape, patched_images_shape) def testRandomPatchGaussianClippedToLowerBound(self): def graph_fn(): preprocessing_options = [(preprocessor.random_patch_gaussian, { 'min_patch_size': 20, 'max_patch_size': 40, 'min_gaussian_stddev': 50, 'max_gaussian_stddev': 100 })] images = tf.zeros([1, 5, 4, 3]) tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) patched_images = processed_tensor_dict[fields.InputDataFields.image] return patched_images patched_images = self.execute_cpu(graph_fn, []) self.assertAllGreaterEqual(patched_images, 0.0) def testRandomPatchGaussianClippedToUpperBound(self): def graph_fn(): preprocessing_options = [(preprocessor.random_patch_gaussian, { 'min_patch_size': 20, 'max_patch_size': 40, 'min_gaussian_stddev': 50, 'max_gaussian_stddev': 100 })] images = tf.constant(255.0, shape=[1, 5, 4, 3]) tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) patched_images = processed_tensor_dict[fields.InputDataFields.image] return patched_images patched_images = self.execute_cpu(graph_fn, []) self.assertAllLessEqual(patched_images, 255.0) def testRandomPatchGaussianWithCache(self): preprocessing_options = [(preprocessor.random_patch_gaussian, { 'min_patch_size': 1, 'max_patch_size': 200, 'min_gaussian_stddev': 0.0, 'max_gaussian_stddev': 2.0 })] self._testPreprocessorCache(preprocessing_options) def testRandomPatchGaussianWithRandomCoefOne(self): def graph_fn(): preprocessing_options = [(preprocessor.random_patch_gaussian, { 'random_coef': 1.0 })] images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} processed_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) patched_images = processed_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) patched_images_shape = tf.shape(patched_images) return patched_images_shape, patched_images, images_shape, images (patched_images_shape, patched_images, images_shape, images) = self.execute_cpu(graph_fn, []) self.assertAllEqual(images_shape, patched_images_shape) self.assertAllEqual(images, patched_images) @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') def testAutoAugmentImage(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.autoaugment_image, { 'policy_name': 'v1' })) images = self.createTestImages() boxes = self.createTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} autoaugment_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options) augmented_images = autoaugment_tensor_dict[fields.InputDataFields.image] augmented_boxes = autoaugment_tensor_dict[ fields.InputDataFields.groundtruth_boxes] images_shape = tf.shape(images) boxes_shape = tf.shape(boxes) augmented_images_shape = tf.shape(augmented_images) augmented_boxes_shape = tf.shape(augmented_boxes) return [images_shape, boxes_shape, augmented_images_shape, augmented_boxes_shape] (images_shape_, boxes_shape_, augmented_images_shape_, augmented_boxes_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(images_shape_, augmented_images_shape_) self.assertAllEqual(boxes_shape_, augmented_boxes_shape_) def testRandomResizeMethodWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_resize_method, { 'target_size': (75, 150) })) self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomResizeMethod(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_resize_method, { 'target_size': (75, 150) })) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} resized_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) resized_images = resized_tensor_dict[fields.InputDataFields.image] resized_images_shape = tf.shape(resized_images) expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32) return [expected_images_shape, resized_images_shape] (expected_images_shape_, resized_images_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(expected_images_shape_, resized_images_shape_) def testResizeImageWithMasks(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] height = 50 width = 100 expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] def graph_fn(in_image_shape, in_masks_shape): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return out_image_shape, out_masks_shape for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): (out_image_shape, out_masks_shape) = self.execute_cpu(graph_fn, [ np.array(in_image_shape, np.int32), np.array(in_masks_shape, np.int32) ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeImageWithMasksTensorInputHeightAndWidth(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] def graph_fn(in_image_shape, in_masks_shape): height = tf.constant(50, dtype=tf.int32) width = tf.constant(100, dtype=tf.int32) in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return out_image_shape, out_masks_shape for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): (out_image_shape, out_masks_shape) = self.execute_cpu(graph_fn, [ np.array(in_image_shape, np.int32), np.array(in_masks_shape, np.int32) ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeImageWithNoInstanceMask(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] height = 50 width = 100 expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]] def graph_fn(in_image_shape, in_masks_shape): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return out_image_shape, out_masks_shape for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): (out_image_shape, out_masks_shape) = self.execute_cpu(graph_fn, [ np.array(in_image_shape, np.int32), np.array(in_masks_shape, np.int32) ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRangePreservesStaticSpatialShape(self): """Tests image resizing, checking output sizes.""" in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): in_image = tf.random_uniform(in_shape) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim) self.assertAllEqual(out_image.get_shape().as_list(), expected_shape) def testResizeToRangeWithDynamicSpatialShape(self): """Tests image resizing, checking output sizes.""" in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] def graph_fn(in_image_shape): in_image = tf.random_uniform(in_image_shape) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) return out_image_shape for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): out_image_shape = self.execute_cpu(graph_fn, [np.array(in_shape, np.int32)]) self.assertAllEqual(out_image_shape, expected_shape) def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self): in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]] def graph_fn(in_image): out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True) return tf.shape(out_image) for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): out_image_shape = self.execute_cpu( graph_fn, [np.random.rand(*in_shape).astype('f')]) self.assertAllEqual(out_image_shape, expected_shape) def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self): in_image_np = np.array([[[0, 1, 2]]], np.float32) ex_image_np = np.array( [[[0, 1, 2], [123.68, 116.779, 103.939]], [[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32) min_dim = 1 max_dim = 2 def graph_fn(in_image): out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True, per_channel_pad_value=(123.68, 116.779, 103.939)) return out_image out_image_np = self.execute_cpu(graph_fn, [in_image_np]) self.assertAllClose(ex_image_np, out_image_np) def testResizeToRangeWithMasksPreservesStaticSpatialShape(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape) self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape) def testResizeToRangeWithMasksAndPadToMaxDimension(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[100, 100, 3], [100, 100, 3]] expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]] def graph_fn(in_image, in_masks): out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return [out_image_shape, out_masks_shape] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): out_image_shape, out_masks_shape = self.execute_cpu( graph_fn, [ np.random.rand(*in_image_shape).astype('f'), np.random.rand(*in_masks_shape).astype('f'), ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRangeWithMasksAndDynamicSpatialShape(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] def graph_fn(in_image, in_masks): out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return [out_image_shape, out_masks_shape] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): out_image_shape, out_masks_shape = self.execute_cpu( graph_fn, [ np.random.rand(*in_image_shape).astype('f'), np.random.rand(*in_masks_shape).astype('f'), ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]] def graph_fn(in_image, in_masks): out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return [out_image_shape, out_masks_shape] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): out_image_shape, out_masks_shape = self.execute_cpu( graph_fn, [ np.random.rand(*in_image_shape).astype('f'), np.random.rand(*in_masks_shape).astype('f'), ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRange4DImageTensor(self): image = tf.random_uniform([1, 200, 300, 3]) with self.assertRaises(ValueError): preprocessor.resize_to_range(image, 500, 600) def testResizeToRangeSameMinMax(self): """Tests image resizing, checking output sizes.""" in_shape_list = [[312, 312, 3], [299, 299, 3]] min_dim = 320 max_dim = 320 expected_shape_list = [[320, 320, 3], [320, 320, 3]] def graph_fn(in_shape): in_image = tf.random_uniform(in_shape) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) return out_image_shape for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): out_image_shape = self.execute_cpu(graph_fn, [np.array(in_shape, np.int32)]) self.assertAllEqual(out_image_shape, expected_shape) def testResizeToMaxDimensionTensorShapes(self): """Tests both cases where image should and shouldn't be resized.""" in_image_shape_list = [[100, 50, 3], [15, 30, 3]] in_masks_shape_list = [[15, 100, 50], [10, 15, 30]] max_dim = 50 expected_image_shape_list = [[50, 25, 3], [15, 30, 3]] expected_masks_shape_list = [[15, 50, 25], [10, 15, 30]] def graph_fn(in_image_shape, in_masks_shape): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_max_dimension( in_image, in_masks, max_dimension=max_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return [out_image_shape, out_masks_shape] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): out_image_shape, out_masks_shape = self.execute_cpu( graph_fn, [ np.array(in_image_shape, np.int32), np.array(in_masks_shape, np.int32) ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToMaxDimensionWithInstanceMasksTensorOfSizeZero(self): """Tests both cases where image should and shouldn't be resized.""" in_image_shape_list = [[100, 50, 3], [15, 30, 3]] in_masks_shape_list = [[0, 100, 50], [0, 15, 30]] max_dim = 50 expected_image_shape_list = [[50, 25, 3], [15, 30, 3]] expected_masks_shape_list = [[0, 50, 25], [0, 15, 30]] def graph_fn(in_image_shape, in_masks_shape): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_max_dimension( in_image, in_masks, max_dimension=max_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return [out_image_shape, out_masks_shape] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): out_image_shape, out_masks_shape = self.execute_cpu( graph_fn, [ np.array(in_image_shape, np.int32), np.array(in_masks_shape, np.int32) ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToMaxDimensionRaisesErrorOn4DImage(self): image = tf.random_uniform([1, 200, 300, 3]) with self.assertRaises(ValueError): preprocessor.resize_to_max_dimension(image, 500) def testResizeToMinDimensionTensorShapes(self): in_image_shape_list = [[60, 55, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 55], [10, 15, 30]] min_dim = 50 expected_image_shape_list = [[60, 55, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]] def graph_fn(in_image_shape, in_masks_shape): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_min_dimension( in_image, in_masks, min_dimension=min_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return [out_image_shape, out_masks_shape] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): out_image_shape, out_masks_shape = self.execute_cpu( graph_fn, [ np.array(in_image_shape, np.int32), np.array(in_masks_shape, np.int32) ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] min_dim = 50 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]] def graph_fn(in_image_shape, in_masks_shape): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_min_dimension( in_image, in_masks, min_dimension=min_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) return [out_image_shape, out_masks_shape] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): out_image_shape, out_masks_shape = self.execute_cpu( graph_fn, [ np.array(in_image_shape, np.int32), np.array(in_masks_shape, np.int32) ]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToMinDimensionRaisesErrorOn4DImage(self): image = tf.random_uniform([1, 200, 300, 3]) with self.assertRaises(ValueError): preprocessor.resize_to_min_dimension(image, 500) def testResizePadToMultipleNoMasks(self): """Tests resizing when padding to multiple without masks.""" def graph_fn(): image = tf.ones((200, 100, 3), dtype=tf.float32) out_image, out_shape = preprocessor.resize_pad_to_multiple( image, multiple=32) return out_image, out_shape out_image, out_shape = self.execute_cpu(graph_fn, []) self.assertAllClose(out_image.sum(), 200 * 100 * 3) self.assertAllEqual(out_shape, (200, 100, 3)) self.assertAllEqual(out_image.shape, (224, 128, 3)) def testResizePadToMultipleWithMasks(self): """Tests resizing when padding to multiple with masks.""" def graph_fn(): image = tf.ones((200, 100, 3), dtype=tf.float32) masks = tf.ones((10, 200, 100), dtype=tf.float32) _, out_masks, out_shape = preprocessor.resize_pad_to_multiple( image, multiple=32, masks=masks) return [out_masks, out_shape] out_masks, out_shape = self.execute_cpu(graph_fn, []) self.assertAllClose(out_masks.sum(), 200 * 100 * 10) self.assertAllEqual(out_shape, (200, 100, 3)) self.assertAllEqual(out_masks.shape, (10, 224, 128)) def testResizePadToMultipleEmptyMasks(self): """Tests resizing when padding to multiple with an empty mask.""" def graph_fn(): image = tf.ones((200, 100, 3), dtype=tf.float32) masks = tf.ones((0, 200, 100), dtype=tf.float32) _, out_masks, out_shape = preprocessor.resize_pad_to_multiple( image, multiple=32, masks=masks) return [out_masks, out_shape] out_masks, out_shape = self.execute_cpu(graph_fn, []) self.assertAllEqual(out_shape, (200, 100, 3)) self.assertAllEqual(out_masks.shape, (0, 224, 128)) def testScaleBoxesToPixelCoordinates(self): """Tests box scaling, checking scaled values.""" def graph_fn(): in_shape = [60, 40, 3] in_boxes = [[0.1, 0.2, 0.4, 0.6], [0.5, 0.3, 0.9, 0.7]] in_image = tf.random_uniform(in_shape) in_boxes = tf.constant(in_boxes) _, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates( in_image, boxes=in_boxes) return out_boxes expected_boxes = [[6., 8., 24., 24.], [30., 12., 54., 28.]] out_boxes = self.execute_cpu(graph_fn, []) self.assertAllClose(out_boxes, expected_boxes) def testScaleBoxesToPixelCoordinatesWithKeypoints(self): """Tests box and keypoint scaling, checking scaled values.""" def graph_fn(): in_shape = [60, 40, 3] in_boxes = self.createTestBoxes() in_keypoints, _ = self.createTestKeypoints() in_image = tf.random_uniform(in_shape) (_, out_boxes, out_keypoints) = preprocessor.scale_boxes_to_pixel_coordinates( in_image, boxes=in_boxes, keypoints=in_keypoints) return out_boxes, out_keypoints expected_boxes = [[0., 10., 45., 40.], [15., 20., 45., 40.]] expected_keypoints = [ [[6., 4.], [12., 8.], [18., 12.]], [[24., 16.], [30., 20.], [36., 24.]], ] out_boxes_, out_keypoints_ = self.execute_cpu(graph_fn, []) self.assertAllClose(out_boxes_, expected_boxes) self.assertAllClose(out_keypoints_, expected_keypoints) def testSubtractChannelMean(self): """Tests whether channel means have been subtracted.""" def graph_fn(): image = tf.zeros((240, 320, 3)) means = [1, 2, 3] actual = preprocessor.subtract_channel_mean(image, means=means) return actual actual = self.execute_cpu(graph_fn, []) self.assertTrue((actual[:, :, 0], -1)) self.assertTrue((actual[:, :, 1], -2)) self.assertTrue((actual[:, :, 2], -3)) def testOneHotEncoding(self): """Tests one hot encoding of multiclass labels.""" def graph_fn(): labels = tf.constant([1, 4, 2], dtype=tf.int32) one_hot = preprocessor.one_hot_encoding(labels, num_classes=5) return one_hot one_hot = self.execute_cpu(graph_fn, []) self.assertAllEqual([0, 1, 1, 0, 1], one_hot) def testRandomSelfConcatImageVertically(self): def graph_fn(): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() confidences = weights scores = self.createTestMultiClassScores() tensor_dict = { fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_confidences: confidences, fields.InputDataFields.multiclass_scores: scores, } preprocessing_options = [(preprocessor.random_self_concat_image, { 'concat_vertical_probability': 1.0, 'concat_horizontal_probability': 0.0, })] func_arg_map = preprocessor.get_default_func_arg_map( True, True, True) output_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=func_arg_map) original_shape = tf.shape(images)[1:3] final_shape = tf.shape(output_tensor_dict[fields.InputDataFields.image])[ 1:3] return [ original_shape, boxes, labels, confidences, scores, final_shape, output_tensor_dict[fields.InputDataFields.groundtruth_boxes], output_tensor_dict[fields.InputDataFields.groundtruth_classes], output_tensor_dict[fields.InputDataFields.groundtruth_confidences], output_tensor_dict[fields.InputDataFields.multiclass_scores], ] (original_shape, boxes, labels, confidences, scores, final_shape, new_boxes, new_labels, new_confidences, new_scores) = self.execute(graph_fn, []) self.assertAllEqual(final_shape, original_shape * np.array([2, 1])) self.assertAllEqual(2 * boxes.size, new_boxes.size) self.assertAllEqual(2 * labels.size, new_labels.size) self.assertAllEqual(2 * confidences.size, new_confidences.size) self.assertAllEqual(2 * scores.size, new_scores.size) def testRandomSelfConcatImageHorizontally(self): def graph_fn(): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() confidences = weights scores = self.createTestMultiClassScores() tensor_dict = { fields.InputDataFields.image: tf.cast(images, dtype=tf.float32), fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_confidences: confidences, fields.InputDataFields.multiclass_scores: scores, } preprocessing_options = [(preprocessor.random_self_concat_image, { 'concat_vertical_probability': 0.0, 'concat_horizontal_probability': 1.0, })] func_arg_map = preprocessor.get_default_func_arg_map( True, True, True) output_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=func_arg_map) original_shape = tf.shape(images)[1:3] final_shape = tf.shape(output_tensor_dict[fields.InputDataFields.image])[ 1:3] return [ original_shape, boxes, labels, confidences, scores, final_shape, output_tensor_dict[fields.InputDataFields.groundtruth_boxes], output_tensor_dict[fields.InputDataFields.groundtruth_classes], output_tensor_dict[fields.InputDataFields.groundtruth_confidences], output_tensor_dict[fields.InputDataFields.multiclass_scores], ] (original_shape, boxes, labels, confidences, scores, final_shape, new_boxes, new_labels, new_confidences, new_scores) = self.execute(graph_fn, []) self.assertAllEqual(final_shape, original_shape * np.array([1, 2])) self.assertAllEqual(2 * boxes.size, new_boxes.size) self.assertAllEqual(2 * labels.size, new_labels.size) self.assertAllEqual(2 * confidences.size, new_confidences.size) self.assertAllEqual(2 * scores.size, new_scores.size) def testSSDRandomCropWithCache(self): preprocess_options = [ (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def testSSDRandomCrop(self): def graph_fn(): preprocessing_options = [ (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop, {})] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) return [boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank] (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testSSDRandomCropWithMultiClassScores(self): def graph_fn(): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop, {})] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() multiclass_scores = self.createTestMultiClassScores() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.multiclass_scores: multiclass_scores, fields.InputDataFields.groundtruth_weights: weights, } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_multiclass_scores=True) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_multiclass_scores = distorted_tensor_dict[ fields.InputDataFields.multiclass_scores] images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) multiclass_scores_rank = tf.rank(multiclass_scores) distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores) return [ boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank, distorted_images_rank, multiclass_scores_rank, distorted_multiclass_scores, distorted_multiclass_scores_rank ] (boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_, distorted_images_rank_, multiclass_scores_rank_, distorted_multiclass_scores_, distorted_multiclass_scores_rank_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) self.assertAllEqual(multiclass_scores_rank_, distorted_multiclass_scores_rank_) self.assertAllEqual(distorted_boxes_.shape[0], distorted_multiclass_scores_.shape[0]) def testSSDRandomCropPad(self): def graph_fn(): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() preprocessing_options = [ (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop_pad, {})] tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) return [ boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank ] (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testSSDRandomCropFixedAspectRatioWithCache(self): preprocess_options = [ (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def _testSSDRandomCropFixedAspectRatio(self, include_multiclass_scores, include_instance_masks, include_keypoints): def graph_fn(): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})] tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights } if include_multiclass_scores: multiclass_scores = self.createTestMultiClassScores() tensor_dict[fields.InputDataFields.multiclass_scores] = ( multiclass_scores) if include_instance_masks: masks = self.createTestMasks() tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks if include_keypoints: keypoints, _ = self.createTestKeypoints() tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_multiclass_scores=include_multiclass_scores, include_instance_masks=include_instance_masks, include_keypoints=include_keypoints) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) return [boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank] (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testSSDRandomCropFixedAspectRatio(self): self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, include_instance_masks=False, include_keypoints=False) def testSSDRandomCropFixedAspectRatioWithMultiClassScores(self): self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=True, include_instance_masks=False, include_keypoints=False) def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self): self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, include_instance_masks=True, include_keypoints=True) def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self): self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, include_instance_masks=True, include_keypoints=True) def testConvertClassLogitsToSoftmax(self): def graph_fn(): multiclass_scores = tf.constant( [[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32) temperature = 2.0 converted_multiclass_scores = ( preprocessor.convert_class_logits_to_softmax( multiclass_scores=multiclass_scores, temperature=temperature)) return converted_multiclass_scores converted_multiclass_scores_ = self.execute_cpu(graph_fn, []) expected_converted_multiclass_scores = [[0.62245935, 0.37754068], [0.5, 0.5], [1, 0]] self.assertAllClose(converted_multiclass_scores_, expected_converted_multiclass_scores) @parameterized.named_parameters( ('scale_1', 1.0), ('scale_1.5', 1.5), ('scale_0.5', 0.5) ) def test_square_crop_by_scale(self, scale): def graph_fn(): image = np.random.randn(256, 256, 1) masks = tf.constant(image[:, :, 0].reshape(1, 256, 256)) image = tf.constant(image) keypoints = tf.constant([[[0.25, 0.25], [0.75, 0.75]]]) boxes = tf.constant([[0.25, .25, .75, .75]]) labels = tf.constant([[1]]) label_confidences = tf.constant([0.75]) label_weights = tf.constant([[1.]]) (new_image, new_boxes, _, _, new_confidences, new_masks, new_keypoints) = preprocessor.random_square_crop_by_scale( image, boxes, labels, label_weights, label_confidences, masks=masks, keypoints=keypoints, max_border=256, scale_min=scale, scale_max=scale) return new_image, new_boxes, new_confidences, new_masks, new_keypoints image, boxes, confidences, masks, keypoints = self.execute_cpu(graph_fn, []) ymin, xmin, ymax, xmax = boxes[0] self.assertAlmostEqual(ymax - ymin, 0.5 / scale) self.assertAlmostEqual(xmax - xmin, 0.5 / scale) k1 = keypoints[0, 0] k2 = keypoints[0, 1] self.assertAlmostEqual(k2[0] - k1[0], 0.5 / scale) self.assertAlmostEqual(k2[1] - k1[1], 0.5 / scale) size = max(image.shape) self.assertAlmostEqual(scale * 256.0, size) self.assertAllClose(image[:, :, 0], masks[0, :, :]) self.assertAllClose(confidences, [0.75]) @parameterized.named_parameters(('scale_0_1', 0.1), ('scale_1_0', 1.0), ('scale_2_0', 2.0)) def test_random_scale_crop_and_pad_to_square(self, scale): def graph_fn(): image = np.random.randn(512, 256, 1) box_centers = [0.25, 0.5, 0.75] box_size = 0.1 box_corners = [] box_labels = [] box_label_weights = [] keypoints = [] masks = [] for center_y in box_centers: for center_x in box_centers: box_corners.append( [center_y - box_size / 2.0, center_x - box_size / 2.0, center_y + box_size / 2.0, center_x + box_size / 2.0]) box_labels.append([1]) box_label_weights.append([1.]) keypoints.append( [[center_y - box_size / 2.0, center_x - box_size / 2.0], [center_y + box_size / 2.0, center_x + box_size / 2.0]]) masks.append(image[:, :, 0].reshape(512, 256)) image = tf.constant(image) boxes = tf.constant(box_corners) labels = tf.constant(box_labels) label_weights = tf.constant(box_label_weights) keypoints = tf.constant(keypoints) masks = tf.constant(np.stack(masks)) (new_image, new_boxes, _, _, new_masks, new_keypoints) = preprocessor.random_scale_crop_and_pad_to_square( image, boxes, labels, label_weights, masks=masks, keypoints=keypoints, scale_min=scale, scale_max=scale, output_size=512) return new_image, new_boxes, new_masks, new_keypoints image, boxes, masks, keypoints = self.execute_cpu(graph_fn, []) # Since random_scale_crop_and_pad_to_square may prune and clip boxes, # we only need to find one of the boxes that was not clipped and check # that it matches the expected dimensions. Note, assertAlmostEqual(a, b) # is equivalent to round(a-b, 7) == 0. any_box_has_correct_size = False effective_scale_y = int(scale * 512) / 512.0 effective_scale_x = int(scale * 256) / 512.0 expected_size_y = 0.1 * effective_scale_y expected_size_x = 0.1 * effective_scale_x for box in boxes: ymin, xmin, ymax, xmax = box any_box_has_correct_size |= ( (round(ymin, 7) != 0.0) and (round(xmin, 7) != 0.0) and (round(ymax, 7) != 1.0) and (round(xmax, 7) != 1.0) and (round((ymax - ymin) - expected_size_y, 7) == 0.0) and (round((xmax - xmin) - expected_size_x, 7) == 0.0)) self.assertTrue(any_box_has_correct_size) # Similar to the approach above where we check for at least one box with the # expected dimensions, we check for at least one pair of keypoints whose # distance matches the expected dimensions. any_keypoint_pair_has_correct_dist = False for keypoint_pair in keypoints: ymin, xmin = keypoint_pair[0] ymax, xmax = keypoint_pair[1] any_keypoint_pair_has_correct_dist |= ( (round(ymin, 7) != 0.0) and (round(xmin, 7) != 0.0) and (round(ymax, 7) != 1.0) and (round(xmax, 7) != 1.0) and (round((ymax - ymin) - expected_size_y, 7) == 0.0) and (round((xmax - xmin) - expected_size_x, 7) == 0.0)) self.assertTrue(any_keypoint_pair_has_correct_dist) self.assertAlmostEqual(512.0, image.shape[0]) self.assertAlmostEqual(512.0, image.shape[1]) self.assertAllClose(image[:, :, 0], masks[0, :, :]) def test_random_scale_crop_and_pad_to_square_handles_confidences(self): def graph_fn(): image = tf.zeros([10, 10, 1]) boxes = tf.constant([[0, 0, 0.5, 0.5], [0.5, 0.5, 0.75, 0.75]]) label_weights = tf.constant([1.0, 1.0]) box_labels = tf.constant([0, 1]) box_confidences = tf.constant([-1.0, 1.0]) (_, new_boxes, _, _, new_confidences) = preprocessor.random_scale_crop_and_pad_to_square( image, boxes, box_labels, label_weights, label_confidences=box_confidences, scale_min=0.8, scale_max=0.9, output_size=10) return new_boxes, new_confidences boxes, confidences = self.execute_cpu(graph_fn, []) self.assertLen(boxes, 2) self.assertAllEqual(confidences, [-1.0, 1.0]) def testAdjustGamma(self): def graph_fn(): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.adjust_gamma, {})) images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_gamma = tensor_dict[fields.InputDataFields.image] image_original_shape = tf.shape(images_original) image_gamma_shape = tf.shape(images_gamma) return [image_original_shape, image_gamma_shape] (image_original_shape_, image_gamma_shape_) = self.execute_cpu(graph_fn, []) self.assertAllEqual(image_original_shape_, image_gamma_shape_) if __name__ == '__main__': tf.test.main()
192,819
43.042942
98
py
models
models-master/research/object_detection/core/box_list_ops_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.box_list_ops.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.utils import test_case class BoxListOpsTest(test_case.TestCase): """Tests for common bounding box operations.""" def test_area(self): def graph_fn(): corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]]) boxes = box_list.BoxList(corners) areas = box_list_ops.area(boxes) return areas areas_out = self.execute(graph_fn, []) exp_output = [200.0, 4.0] self.assertAllClose(areas_out, exp_output) def test_height_width(self): def graph_fn(): corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]]) boxes = box_list.BoxList(corners) return box_list_ops.height_width(boxes) heights_out, widths_out = self.execute(graph_fn, []) exp_output_heights = [10., 2.] exp_output_widths = [20., 2.] self.assertAllClose(heights_out, exp_output_heights) self.assertAllClose(widths_out, exp_output_widths) def test_scale(self): def graph_fn(): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) return scaled_boxes.get(), scaled_boxes.get_field('extra_data') scaled_corners_out, extra_data_out = self.execute(graph_fn, []) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] self.assertAllClose(scaled_corners_out, exp_output) self.assertAllEqual(extra_data_out, [[1], [2]]) def test_scale_height_width(self): def graph_fn(): corners = tf.constant([[-10, -20, 10, 20], [0, 100, 100, 200]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(2.) x_scale = tf.constant(0.5) scaled_boxes = box_list_ops.scale_height_width(boxes, y_scale, x_scale) return scaled_boxes.get(), scaled_boxes.get_field('extra_data') exp_output = [ [-20., -10, 20., 10], [-50., 125, 150., 175.]] scaled_corners_out, extra_data_out = self.execute(graph_fn, []) self.assertAllClose(scaled_corners_out, exp_output) self.assertAllEqual(extra_data_out, [[1], [2]]) def test_clip_to_window_filter_boxes_which_fall_outside_the_window( self): def graph_fn(): window = tf.constant([0, 0, 9, 14], tf.float32) corners = tf.constant([[5.0, 5.0, 6.0, 6.0], [-1.0, -2.0, 4.0, 5.0], [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], [-100.0, -100.0, 300.0, 600.0], [-10.0, -10.0, -9.0, -9.0]]) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) pruned = box_list_ops.clip_to_window( boxes, window, filter_nonoverlapping=True) return pruned.get(), pruned.get_field('extra_data') exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0], [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], [0.0, 0.0, 9.0, 14.0]] pruned_output, extra_data_out = self.execute_cpu(graph_fn, []) self.assertAllClose(pruned_output, exp_output) self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5]]) def test_clip_to_window_without_filtering_boxes_which_fall_outside_the_window( self): def graph_fn(): window = tf.constant([0, 0, 9, 14], tf.float32) corners = tf.constant([[5.0, 5.0, 6.0, 6.0], [-1.0, -2.0, 4.0, 5.0], [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], [-100.0, -100.0, 300.0, 600.0], [-10.0, -10.0, -9.0, -9.0]]) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) pruned = box_list_ops.clip_to_window( boxes, window, filter_nonoverlapping=False) return pruned.get(), pruned.get_field('extra_data') pruned_output, extra_data_out = self.execute(graph_fn, []) exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0], [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], [0.0, 0.0, 9.0, 14.0], [0.0, 0.0, 0.0, 0.0]] self.assertAllClose(pruned_output, exp_output) self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5], [6]]) def test_prune_outside_window_filters_boxes_which_fall_outside_the_window( self): def graph_fn(): window = tf.constant([0, 0, 9, 14], tf.float32) corners = tf.constant([[5.0, 5.0, 6.0, 6.0], [-1.0, -2.0, 4.0, 5.0], [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], [-10.0, -10.0, -9.0, -9.0], [-100.0, -100.0, 300.0, 600.0]]) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) pruned, keep_indices = box_list_ops.prune_outside_window(boxes, window) return pruned.get(), pruned.get_field('extra_data'), keep_indices pruned_output, extra_data_out, keep_indices_out = self.execute_cpu(graph_fn, []) exp_output = [[5.0, 5.0, 6.0, 6.0], [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0]] self.assertAllClose(pruned_output, exp_output) self.assertAllEqual(keep_indices_out, [0, 2, 3]) self.assertAllEqual(extra_data_out, [[1], [3], [4]]) def test_prune_completely_outside_window(self): def graph_fn(): window = tf.constant([0, 0, 9, 14], tf.float32) corners = tf.constant([[5.0, 5.0, 6.0, 6.0], [-1.0, -2.0, 4.0, 5.0], [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], [-10.0, -10.0, -9.0, -9.0], [-100.0, -100.0, 300.0, 600.0]]) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]])) pruned, keep_indices = box_list_ops.prune_completely_outside_window( boxes, window) return pruned.get(), pruned.get_field('extra_data'), keep_indices pruned_output, extra_data_out, keep_indices_out = self.execute(graph_fn, []) exp_output = [[5.0, 5.0, 6.0, 6.0], [-1.0, -2.0, 4.0, 5.0], [2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0], [-100.0, -100.0, 300.0, 600.0]] self.assertAllClose(pruned_output, exp_output) self.assertAllEqual(keep_indices_out, [0, 1, 2, 3, 5]) self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [6]]) def test_prune_completely_outside_window_with_empty_boxlist(self): def graph_fn(): window = tf.constant([0, 0, 9, 14], tf.float32) corners = tf.zeros(shape=[0, 4], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.zeros(shape=[0], dtype=tf.int32)) pruned, keep_indices = box_list_ops.prune_completely_outside_window( boxes, window) pruned_boxes = pruned.get() extra = pruned.get_field('extra_data') return pruned_boxes, extra, keep_indices pruned_boxes_out, extra_out, keep_indices_out = self.execute(graph_fn, []) exp_pruned_boxes = np.zeros(shape=[0, 4], dtype=np.float32) exp_extra = np.zeros(shape=[0], dtype=np.int32) self.assertAllClose(exp_pruned_boxes, pruned_boxes_out) self.assertAllEqual([], keep_indices_out) self.assertAllEqual(exp_extra, extra_out) def test_intersection(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) intersect = box_list_ops.intersection(boxes1, boxes2) return intersect exp_output = [[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]] intersect_out = self.execute(graph_fn, []) self.assertAllClose(intersect_out, exp_output) def test_matched_intersection(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) intersect = box_list_ops.matched_intersection(boxes1, boxes2) return intersect exp_output = [2.0, 0.0] intersect_out = self.execute(graph_fn, []) self.assertAllClose(intersect_out, exp_output) def test_iou(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) iou = box_list_ops.iou(boxes1, boxes2) return iou exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]] iou_output = self.execute(graph_fn, []) self.assertAllClose(iou_output, exp_output) def test_l1(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) l1 = box_list_ops.l1(boxes1, boxes2) return l1 exp_output = [[5.0, 22.5, 45.5], [8.5, 19.0, 40.0]] l1_output = self.execute(graph_fn, []) self.assertAllClose(l1_output, exp_output) def test_giou(self): def graph_fn(): corners1 = tf.constant([[5.0, 7.0, 7.0, 9.0]]) corners2 = tf.constant([[5.0, 7.0, 7.0, 9.0], [5.0, 11.0, 7.0, 13.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) giou = box_list_ops.giou(boxes1, boxes2) return giou exp_output = [[1.0, -1.0 / 3.0]] giou_output = self.execute(graph_fn, []) self.assertAllClose(giou_output, exp_output) def test_matched_iou(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) iou = box_list_ops.matched_iou(boxes1, boxes2) return iou exp_output = [2.0 / 16.0, 0] iou_output = self.execute(graph_fn, []) self.assertAllClose(iou_output, exp_output) def test_iouworks_on_empty_inputs(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) boxes_empty = box_list.BoxList(tf.zeros((0, 4))) iou_empty_1 = box_list_ops.iou(boxes1, boxes_empty) iou_empty_2 = box_list_ops.iou(boxes_empty, boxes2) iou_empty_3 = box_list_ops.iou(boxes_empty, boxes_empty) return iou_empty_1, iou_empty_2, iou_empty_3 iou_output_1, iou_output_2, iou_output_3 = self.execute(graph_fn, []) self.assertAllEqual(iou_output_1.shape, (2, 0)) self.assertAllEqual(iou_output_2.shape, (0, 3)) self.assertAllEqual(iou_output_3.shape, (0, 0)) def test_ioa(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) ioa_1 = box_list_ops.ioa(boxes1, boxes2) ioa_2 = box_list_ops.ioa(boxes2, boxes1) return ioa_1, ioa_2 exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0], [1.0 / 12.0, 0.0, 5.0 / 400.0]] exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0], [0, 0], [6.0 / 6.0, 5.0 / 5.0]] ioa_output_1, ioa_output_2 = self.execute(graph_fn, []) self.assertAllClose(ioa_output_1, exp_output_1) self.assertAllClose(ioa_output_2, exp_output_2) def test_prune_non_overlapping_boxes(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) minoverlap = 0.5 exp_output_1 = boxes1 exp_output_2 = box_list.BoxList(tf.constant(0.0, shape=[0, 4])) output_1, keep_indices_1 = box_list_ops.prune_non_overlapping_boxes( boxes1, boxes2, min_overlap=minoverlap) output_2, keep_indices_2 = box_list_ops.prune_non_overlapping_boxes( boxes2, boxes1, min_overlap=minoverlap) return (output_1.get(), keep_indices_1, output_2.get(), keep_indices_2, exp_output_1.get(), exp_output_2.get()) (output_1_, keep_indices_1_, output_2_, keep_indices_2_, exp_output_1_, exp_output_2_) = self.execute_cpu(graph_fn, []) self.assertAllClose(output_1_, exp_output_1_) self.assertAllClose(output_2_, exp_output_2_) self.assertAllEqual(keep_indices_1_, [0, 1]) self.assertAllEqual(keep_indices_2_, []) def test_prune_small_boxes(self): def graph_fn(): boxes = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes = box_list.BoxList(boxes) pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3) return pruned_boxes.get() exp_boxes = [[3.0, 4.0, 6.0, 8.0], [0.0, 0.0, 20.0, 20.0]] pruned_boxes = self.execute(graph_fn, []) self.assertAllEqual(pruned_boxes, exp_boxes) def test_prune_small_boxes_prunes_boxes_with_negative_side(self): def graph_fn(): boxes = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0], [2.0, 3.0, 1.5, 7.0], # negative height [2.0, 3.0, 5.0, 1.7]]) # negative width boxes = box_list.BoxList(boxes) pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3) return pruned_boxes.get() exp_boxes = [[3.0, 4.0, 6.0, 8.0], [0.0, 0.0, 20.0, 20.0]] pruned_boxes = self.execute_cpu(graph_fn, []) self.assertAllEqual(pruned_boxes, exp_boxes) def test_change_coordinate_frame(self): def graph_fn(): corners = tf.constant([[0.25, 0.5, 0.75, 0.75], [0.5, 0.0, 1.0, 1.0]]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) boxes = box_list.BoxList(corners) expected_corners = tf.constant([[0, 0.5, 1.0, 1.0], [0.5, -0.5, 1.5, 1.5]]) expected_boxes = box_list.BoxList(expected_corners) output = box_list_ops.change_coordinate_frame(boxes, window) return output.get(), expected_boxes.get() output_, expected_boxes_ = self.execute(graph_fn, []) self.assertAllClose(output_, expected_boxes_) def test_ioaworks_on_empty_inputs(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) boxes_empty = box_list.BoxList(tf.zeros((0, 4))) ioa_empty_1 = box_list_ops.ioa(boxes1, boxes_empty) ioa_empty_2 = box_list_ops.ioa(boxes_empty, boxes2) ioa_empty_3 = box_list_ops.ioa(boxes_empty, boxes_empty) return ioa_empty_1, ioa_empty_2, ioa_empty_3 ioa_output_1, ioa_output_2, ioa_output_3 = self.execute(graph_fn, []) self.assertAllEqual(ioa_output_1.shape, (2, 0)) self.assertAllEqual(ioa_output_2.shape, (0, 3)) self.assertAllEqual(ioa_output_3.shape, (0, 0)) def test_pairwise_distances(self): def graph_fn(): corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 2.0]]) corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 3.0], [0.0, 0.0, 0.0, 0.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) dist_matrix = box_list_ops.sq_dist(boxes1, boxes2) return dist_matrix exp_output = [[26, 25, 0], [18, 27, 6]] dist_output = self.execute(graph_fn, []) self.assertAllClose(dist_output, exp_output) def test_boolean_mask(self): def graph_fn(): corners = tf.constant( [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) indicator = tf.constant([True, False, True, False, True], tf.bool) boxes = box_list.BoxList(corners) subset = box_list_ops.boolean_mask(boxes, indicator) return subset.get() expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] subset_output = self.execute_cpu(graph_fn, []) self.assertAllClose(subset_output, expected_subset) def test_static_boolean_mask_with_field(self): def graph_fn(corners, weights, indicator): boxes = box_list.BoxList(corners) boxes.add_field('weights', weights) subset = box_list_ops.boolean_mask( boxes, indicator, ['weights'], use_static_shapes=True, indicator_sum=3) return (subset.get_field('boxes'), subset.get_field('weights')) corners = np.array( [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]], dtype=np.float32) indicator = np.array([True, False, True, False, True], dtype=bool) weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32) result_boxes, result_weights = self.execute_cpu( graph_fn, [corners, weights, indicator]) expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]] expected_weights = [[.1], [.5], [.9]] self.assertAllClose(result_boxes, expected_boxes) self.assertAllClose(result_weights, expected_weights) def test_gather(self): def graph_fn(): corners = tf.constant( [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) indices = tf.constant([0, 2, 4], tf.int32) boxes = box_list.BoxList(corners) subset = box_list_ops.gather(boxes, indices) return subset.get() expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] subset_output = self.execute(graph_fn, []) self.assertAllClose(subset_output, expected_subset) def test_static_gather_with_field(self): def graph_fn(corners, weights, indices): boxes = box_list.BoxList(corners) boxes.add_field('weights', weights) subset = box_list_ops.gather( boxes, indices, ['weights'], use_static_shapes=True) return (subset.get_field('boxes'), subset.get_field('weights')) corners = np.array([4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]], dtype=np.float32) weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32) indices = np.array([0, 2, 4], dtype=np.int32) result_boxes, result_weights = self.execute(graph_fn, [corners, weights, indices]) expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]] expected_weights = [[.1], [.5], [.9]] self.assertAllClose(result_boxes, expected_boxes) self.assertAllClose(result_weights, expected_weights) def test_gather_with_invalid_field(self): corners = tf.constant([4 * [0.0], 4 * [1.0]]) indices = tf.constant([0, 1], tf.int32) weights = tf.constant([[.1], [.3]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('weights', weights) with self.assertRaises(ValueError): box_list_ops.gather(boxes, indices, ['foo', 'bar']) def test_gather_with_invalid_inputs(self): corners = tf.constant( [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) indices_float32 = tf.constant([0, 2, 4], tf.float32) boxes = box_list.BoxList(corners) with self.assertRaises(ValueError): _ = box_list_ops.gather(boxes, indices_float32) indices_2d = tf.constant([[0, 2, 4]], tf.int32) boxes = box_list.BoxList(corners) with self.assertRaises(ValueError): _ = box_list_ops.gather(boxes, indices_2d) def test_gather_with_dynamic_indexing(self): def graph_fn(): corners = tf.constant( [4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) weights = tf.constant([.5, .3, .7, .1, .9], tf.float32) indices = tf.reshape(tf.where(tf.greater(weights, 0.4)), [-1]) boxes = box_list.BoxList(corners) boxes.add_field('weights', weights) subset = box_list_ops.gather(boxes, indices, ['weights']) return subset.get(), subset.get_field('weights') expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] expected_weights = [.5, .7, .9] subset_output, weights_output = self.execute(graph_fn, []) self.assertAllClose(subset_output, expected_subset) self.assertAllClose(weights_output, expected_weights) def test_sort_by_field_ascending_order(self): exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] exp_scores = [.95, .9, .75, .6, .5, .3] exp_weights = [.2, .45, .6, .75, .8, .92] def graph_fn(): shuffle = [2, 4, 0, 5, 1, 3] corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('scores', tf.constant( [exp_scores[i] for i in shuffle], tf.float32)) boxes.add_field('weights', tf.constant( [exp_weights[i] for i in shuffle], tf.float32)) sort_by_weight = box_list_ops.sort_by_field( boxes, 'weights', order=box_list_ops.SortOrder.ascend) return [sort_by_weight.get(), sort_by_weight.get_field('scores'), sort_by_weight.get_field('weights')] corners_out, scores_out, weights_out = self.execute(graph_fn, []) self.assertAllClose(corners_out, exp_corners) self.assertAllClose(scores_out, exp_scores) self.assertAllClose(weights_out, exp_weights) def test_sort_by_field_descending_order(self): exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]] exp_scores = [.95, .9, .75, .6, .5, .3] exp_weights = [.2, .45, .6, .75, .8, .92] def graph_fn(): shuffle = [2, 4, 0, 5, 1, 3] corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('scores', tf.constant( [exp_scores[i] for i in shuffle], tf.float32)) boxes.add_field('weights', tf.constant( [exp_weights[i] for i in shuffle], tf.float32)) sort_by_score = box_list_ops.sort_by_field(boxes, 'scores') return (sort_by_score.get(), sort_by_score.get_field('scores'), sort_by_score.get_field('weights')) corners_out, scores_out, weights_out = self.execute(graph_fn, []) self.assertAllClose(corners_out, exp_corners) self.assertAllClose(scores_out, exp_scores) self.assertAllClose(weights_out, exp_weights) def test_sort_by_field_invalid_inputs(self): corners = tf.constant([4 * [0.0], 4 * [0.5], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]) misc = tf.constant([[.95, .9], [.5, .3]], tf.float32) weights = tf.constant([[.1, .2]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('misc', misc) boxes.add_field('weights', weights) with self.assertRaises(ValueError): box_list_ops.sort_by_field(boxes, 'area') with self.assertRaises(ValueError): box_list_ops.sort_by_field(boxes, 'misc') with self.assertRaises(ValueError): box_list_ops.sort_by_field(boxes, 'weights') def test_visualize_boxes_in_image(self): def graph_fn(): image = tf.zeros((6, 4, 3)) corners = tf.constant([[0, 0, 5, 3], [0, 0, 3, 2]], tf.float32) boxes = box_list.BoxList(corners) image_and_boxes = box_list_ops.visualize_boxes_in_image(image, boxes) image_and_boxes_bw = tf.cast( tf.greater(tf.reduce_sum(image_and_boxes, 2), 0.0), dtype=tf.float32) return image_and_boxes_bw exp_result = [[1, 1, 1, 0], [1, 1, 1, 0], [1, 1, 1, 0], [1, 0, 1, 0], [1, 1, 1, 0], [0, 0, 0, 0]] output = self.execute_cpu(graph_fn, []) self.assertAllEqual(output.astype(int), exp_result) def test_filter_field_value_equals(self): def graph_fn(): corners = tf.constant([[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('classes', tf.constant([1, 2, 1, 2, 2, 1])) filtered_boxes1 = box_list_ops.filter_field_value_equals( boxes, 'classes', 1) filtered_boxes2 = box_list_ops.filter_field_value_equals( boxes, 'classes', 2) return filtered_boxes1.get(), filtered_boxes2.get() exp_output1 = [[0, 0, 1, 1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]] exp_output2 = [[0, 0.1, 1, 1.1], [0, 10, 1, 11], [0, 10.1, 1, 11.1]] filtered_output1, filtered_output2 = self.execute_cpu(graph_fn, []) self.assertAllClose(filtered_output1, exp_output1) self.assertAllClose(filtered_output2, exp_output2) def test_filter_greater_than(self): def graph_fn(): corners = tf.constant([[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('scores', tf.constant([.1, .75, .9, .5, .5, .8])) thresh = .6 filtered_boxes = box_list_ops.filter_greater_than(boxes, thresh) return filtered_boxes.get() exp_output = [[0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]] filtered_output = self.execute_cpu(graph_fn, []) self.assertAllClose(filtered_output, exp_output) def test_clip_box_list(self): def graph_fn(): boxlist = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32)) boxlist.add_field('classes', tf.constant([0, 0, 1, 1])) boxlist.add_field('scores', tf.constant([0.75, 0.65, 0.3, 0.2])) num_boxes = 2 clipped_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes) return (clipped_boxlist.get(), clipped_boxlist.get_field('classes'), clipped_boxlist.get_field('scores')) expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]] expected_classes = [0, 0] expected_scores = [0.75, 0.65] boxes_out, classes_out, scores_out = self.execute(graph_fn, []) self.assertAllClose(expected_boxes, boxes_out) self.assertAllEqual(expected_classes, classes_out) self.assertAllClose(expected_scores, scores_out) def test_pad_box_list(self): def graph_fn(): boxlist = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) boxlist.add_field('classes', tf.constant([0, 1])) boxlist.add_field('scores', tf.constant([0.75, 0.2])) num_boxes = 4 padded_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes) return (padded_boxlist.get(), padded_boxlist.get_field('classes'), padded_boxlist.get_field('scores')) expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] expected_classes = [0, 1, 0, 0] expected_scores = [0.75, 0.2, 0, 0] boxes_out, classes_out, scores_out = self.execute(graph_fn, []) self.assertAllClose(expected_boxes, boxes_out) self.assertAllEqual(expected_classes, classes_out) self.assertAllClose(expected_scores, scores_out) def test_select_random_box(self): boxes = [[0., 0., 1., 1.], [0., 1., 2., 3.], [0., 2., 3., 4.]] def graph_fn(): corners = tf.constant(boxes, dtype=tf.float32) boxlist = box_list.BoxList(corners) random_bbox, valid = box_list_ops.select_random_box(boxlist) return random_bbox, valid random_bbox_out, valid_out = self.execute(graph_fn, []) norm_small = any( [np.linalg.norm(random_bbox_out - box) < 1e-6 for box in boxes]) self.assertTrue(norm_small) self.assertTrue(valid_out) def test_select_random_box_with_empty_boxlist(self): def graph_fn(): corners = tf.constant([], shape=[0, 4], dtype=tf.float32) boxlist = box_list.BoxList(corners) random_bbox, valid = box_list_ops.select_random_box(boxlist) return random_bbox, valid random_bbox_out, valid_out = self.execute_cpu(graph_fn, []) expected_bbox_out = np.array([[-1., -1., -1., -1.]], dtype=np.float32) self.assertAllEqual(expected_bbox_out, random_bbox_out) self.assertFalse(valid_out) def test_get_minimal_coverage_box(self): def graph_fn(): boxes = [[0., 0., 1., 1.], [-1., 1., 2., 3.], [0., 2., 3., 4.]] corners = tf.constant(boxes, dtype=tf.float32) boxlist = box_list.BoxList(corners) coverage_box = box_list_ops.get_minimal_coverage_box(boxlist) return coverage_box coverage_box_out = self.execute(graph_fn, []) expected_coverage_box = [[-1., 0., 3., 4.]] self.assertAllClose(expected_coverage_box, coverage_box_out) def test_get_minimal_coverage_box_with_empty_boxlist(self): def graph_fn(): corners = tf.constant([], shape=[0, 4], dtype=tf.float32) boxlist = box_list.BoxList(corners) coverage_box = box_list_ops.get_minimal_coverage_box(boxlist) return coverage_box coverage_box_out = self.execute(graph_fn, []) self.assertAllClose([[0.0, 0.0, 1.0, 1.0]], coverage_box_out) class ConcatenateTest(test_case.TestCase): def test_invalid_input_box_list_list(self): with self.assertRaises(ValueError): box_list_ops.concatenate(None) with self.assertRaises(ValueError): box_list_ops.concatenate([]) with self.assertRaises(ValueError): corners = tf.constant([[0, 0, 0, 0]], tf.float32) boxlist = box_list.BoxList(corners) box_list_ops.concatenate([boxlist, 2]) def test_concatenate_with_missing_fields(self): corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) scores1 = tf.constant([1.0, 2.1]) corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32) boxlist1 = box_list.BoxList(corners1) boxlist1.add_field('scores', scores1) boxlist2 = box_list.BoxList(corners2) with self.assertRaises(ValueError): box_list_ops.concatenate([boxlist1, boxlist2]) def test_concatenate_with_incompatible_field_shapes(self): corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) scores1 = tf.constant([1.0, 2.1]) corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32) scores2 = tf.constant([[1.0, 1.0], [2.1, 3.2]]) boxlist1 = box_list.BoxList(corners1) boxlist1.add_field('scores', scores1) boxlist2 = box_list.BoxList(corners2) boxlist2.add_field('scores', scores2) with self.assertRaises(ValueError): box_list_ops.concatenate([boxlist1, boxlist2]) def test_concatenate_is_correct(self): def graph_fn(): corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32) scores1 = tf.constant([1.0, 2.1]) corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8], [1, 0, 5, 10]], tf.float32) scores2 = tf.constant([1.0, 2.1, 5.6]) boxlist1 = box_list.BoxList(corners1) boxlist1.add_field('scores', scores1) boxlist2 = box_list.BoxList(corners2) boxlist2.add_field('scores', scores2) result = box_list_ops.concatenate([boxlist1, boxlist2]) return result.get(), result.get_field('scores') exp_corners = [[0, 0, 0, 0], [1, 2, 3, 4], [0, 3, 1, 6], [2, 4, 3, 8], [1, 0, 5, 10]] exp_scores = [1.0, 2.1, 1.0, 2.1, 5.6] corners_output, scores_output = self.execute(graph_fn, []) self.assertAllClose(corners_output, exp_corners) self.assertAllClose(scores_output, exp_scores) class NonMaxSuppressionTest(test_case.TestCase): def test_select_from_three_clusters(self): def graph_fn(): corners = tf.constant([[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) iou_thresh = .5 max_output_size = 3 nms = box_list_ops.non_max_suppression( boxes, iou_thresh, max_output_size) return nms.get() exp_nms = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]] nms_output = self.execute_cpu(graph_fn, []) self.assertAllClose(nms_output, exp_nms) def test_select_at_most_two_boxes_from_three_clusters(self): def graph_fn(): corners = tf.constant([[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) iou_thresh = .5 max_output_size = 2 nms = box_list_ops.non_max_suppression( boxes, iou_thresh, max_output_size) return nms.get() exp_nms = [[0, 10, 1, 11], [0, 0, 1, 1]] nms_output = self.execute_cpu(graph_fn, []) self.assertAllClose(nms_output, exp_nms) def test_select_at_most_thirty_boxes_from_three_clusters(self): def graph_fn(): corners = tf.constant([[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3])) iou_thresh = .5 max_output_size = 30 nms = box_list_ops.non_max_suppression( boxes, iou_thresh, max_output_size) return nms.get() exp_nms = [[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]] nms_output = self.execute_cpu(graph_fn, []) self.assertAllClose(nms_output, exp_nms) def test_select_single_box(self): def graph_fn(): corners = tf.constant([[0, 0, 1, 1]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('scores', tf.constant([.9])) iou_thresh = .5 max_output_size = 3 nms = box_list_ops.non_max_suppression( boxes, iou_thresh, max_output_size) return nms.get() exp_nms = [[0, 0, 1, 1]] nms_output = self.execute_cpu(graph_fn, []) self.assertAllClose(nms_output, exp_nms) def test_select_from_ten_identical_boxes(self): def graph_fn(): corners = tf.constant(10 * [[0, 0, 1, 1]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('scores', tf.constant(10 * [.9])) iou_thresh = .5 max_output_size = 3 nms = box_list_ops.non_max_suppression( boxes, iou_thresh, max_output_size) return nms.get() exp_nms = [[0, 0, 1, 1]] nms_output = self.execute_cpu(graph_fn, []) self.assertAllClose(nms_output, exp_nms) def test_copy_extra_fields(self): tensor1 = np.array([[1], [4]]) tensor2 = np.array([[1, 1], [2, 2]]) def graph_fn(): corners = tf.constant([[0, 0, 1, 1], [0, 0.1, 1, 1.1]], tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('tensor1', tf.constant(tensor1)) boxes.add_field('tensor2', tf.constant(tensor2)) new_boxes = box_list.BoxList(tf.constant([[0, 0, 10, 10], [1, 3, 5, 5]], tf.float32)) new_boxes = box_list_ops._copy_extra_fields(new_boxes, boxes) return new_boxes.get_field('tensor1'), new_boxes.get_field('tensor2') tensor1_out, tensor2_out = self.execute_cpu(graph_fn, []) self.assertAllClose(tensor1, tensor1_out) self.assertAllClose(tensor2, tensor2_out) class CoordinatesConversionTest(test_case.TestCase): def test_to_normalized_coordinates(self): def graph_fn(): coordinates = tf.constant([[0, 0, 100, 100], [25, 25, 75, 75]], tf.float32) img = tf.ones((128, 100, 100, 3)) boxlist = box_list.BoxList(coordinates) normalized_boxlist = box_list_ops.to_normalized_coordinates( boxlist, tf.shape(img)[1], tf.shape(img)[2]) return normalized_boxlist.get() expected_boxes = [[0, 0, 1, 1], [0.25, 0.25, 0.75, 0.75]] normalized_boxes = self.execute(graph_fn, []) self.assertAllClose(normalized_boxes, expected_boxes) def test_to_normalized_coordinates_already_normalized(self): def graph_fn(): coordinates = tf.constant([[0, 0, 1, 1], [0.25, 0.25, 0.75, 0.75]], tf.float32) img = tf.ones((128, 100, 100, 3)) boxlist = box_list.BoxList(coordinates) normalized_boxlist = box_list_ops.to_normalized_coordinates( boxlist, tf.shape(img)[1], tf.shape(img)[2]) return normalized_boxlist.get() with self.assertRaisesOpError('assertion failed'): self.execute_cpu(graph_fn, []) def test_to_absolute_coordinates(self): def graph_fn(): coordinates = tf.constant([[0, 0, 1, 1], [0.25, 0.25, 0.75, 0.75]], tf.float32) img = tf.ones((128, 100, 100, 3)) boxlist = box_list.BoxList(coordinates) absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist, tf.shape(img)[1], tf.shape(img)[2]) return absolute_boxlist.get() expected_boxes = [[0, 0, 100, 100], [25, 25, 75, 75]] absolute_boxes = self.execute(graph_fn, []) self.assertAllClose(absolute_boxes, expected_boxes) def test_to_absolute_coordinates_already_abolute(self): def graph_fn(): coordinates = tf.constant([[0, 0, 100, 100], [25, 25, 75, 75]], tf.float32) img = tf.ones((128, 100, 100, 3)) boxlist = box_list.BoxList(coordinates) absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist, tf.shape(img)[1], tf.shape(img)[2]) return absolute_boxlist.get() with self.assertRaisesOpError('assertion failed'): self.execute_cpu(graph_fn, []) def test_convert_to_normalized_and_back(self): coordinates = np.random.uniform(size=(100, 4)) coordinates = np.round(np.sort(coordinates) * 200) coordinates[:, 2:4] += 1 coordinates[99, :] = [0, 0, 201, 201] def graph_fn(): img = tf.ones((128, 202, 202, 3)) boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32)) boxlist = box_list_ops.to_normalized_coordinates(boxlist, tf.shape(img)[1], tf.shape(img)[2]) boxlist = box_list_ops.to_absolute_coordinates(boxlist, tf.shape(img)[1], tf.shape(img)[2]) return boxlist.get() out = self.execute(graph_fn, []) self.assertAllClose(out, coordinates) def test_convert_to_absolute_and_back(self): coordinates = np.random.uniform(size=(100, 4)) coordinates = np.sort(coordinates) coordinates[99, :] = [0, 0, 1, 1] def graph_fn(): img = tf.ones((128, 202, 202, 3)) boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32)) boxlist = box_list_ops.to_absolute_coordinates(boxlist, tf.shape(img)[1], tf.shape(img)[2]) boxlist = box_list_ops.to_normalized_coordinates(boxlist, tf.shape(img)[1], tf.shape(img)[2]) return boxlist.get() out = self.execute(graph_fn, []) self.assertAllClose(out, coordinates) def test_to_absolute_coordinates_maximum_coordinate_check(self): def graph_fn(): coordinates = tf.constant([[0, 0, 1.2, 1.2], [0.25, 0.25, 0.75, 0.75]], tf.float32) img = tf.ones((128, 100, 100, 3)) boxlist = box_list.BoxList(coordinates) absolute_boxlist = box_list_ops.to_absolute_coordinates( boxlist, tf.shape(img)[1], tf.shape(img)[2], maximum_normalized_coordinate=1.1) return absolute_boxlist.get() with self.assertRaisesOpError('assertion failed'): self.execute_cpu(graph_fn, []) class BoxRefinementTest(test_case.TestCase): def test_box_voting(self): def graph_fn(): candidates = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.6, 0.6, 0.8, 0.8]], tf.float32)) candidates.add_field('ExtraField', tf.constant([1, 2])) pool = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], [0.6, 0.6, 0.8, 0.8]], tf.float32)) pool.add_field('scores', tf.constant([0.75, 0.25, 0.3])) averaged_boxes = box_list_ops.box_voting(candidates, pool) return (averaged_boxes.get(), averaged_boxes.get_field('scores'), averaged_boxes.get_field('ExtraField')) expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]] expected_scores = [0.5, 0.3] boxes_out, scores_out, extra_field_out = self.execute(graph_fn, []) self.assertAllClose(expected_boxes, boxes_out) self.assertAllClose(expected_scores, scores_out) self.assertAllEqual(extra_field_out, [1, 2]) def test_box_voting_fails_with_negative_scores(self): def graph_fn(): candidates = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) pool = box_list.BoxList(tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) pool.add_field('scores', tf.constant([-0.2])) averaged_boxes = box_list_ops.box_voting(candidates, pool) return averaged_boxes.get() with self.assertRaisesOpError('Scores must be non negative'): self.execute_cpu(graph_fn, []) def test_box_voting_fails_when_unmatched(self): def graph_fn(): candidates = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32)) pool = box_list.BoxList(tf.constant([[0.6, 0.6, 0.8, 0.8]], tf.float32)) pool.add_field('scores', tf.constant([0.2])) averaged_boxes = box_list_ops.box_voting(candidates, pool) return averaged_boxes.get() with self.assertRaisesOpError('Each box in selected_boxes must match ' 'with at least one box in pool_boxes.'): self.execute_cpu(graph_fn, []) def test_refine_boxes(self): def graph_fn(): pool = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], [0.6, 0.6, 0.8, 0.8]], tf.float32)) pool.add_field('ExtraField', tf.constant([1, 2, 3])) pool.add_field('scores', tf.constant([0.75, 0.25, 0.3])) averaged_boxes = box_list_ops.refine_boxes(pool, 0.5, 10) return (averaged_boxes.get(), averaged_boxes.get_field('scores'), averaged_boxes.get_field('ExtraField')) boxes_out, scores_out, extra_field_out = self.execute_cpu(graph_fn, []) expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]] expected_scores = [0.5, 0.3] self.assertAllClose(expected_boxes, boxes_out) self.assertAllClose(expected_scores, scores_out) self.assertAllEqual(extra_field_out, [1, 3]) def test_refine_boxes_multi_class(self): def graph_fn(): pool = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32)) pool.add_field('classes', tf.constant([0, 0, 1, 1])) pool.add_field('scores', tf.constant([0.75, 0.25, 0.3, 0.2])) averaged_boxes = box_list_ops.refine_boxes_multi_class(pool, 3, 0.5, 10) return (averaged_boxes.get(), averaged_boxes.get_field('scores'), averaged_boxes.get_field('classes')) boxes_out, scores_out, extra_field_out = self.execute_cpu(graph_fn, []) expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]] expected_scores = [0.5, 0.3, 0.2] self.assertAllClose(expected_boxes, boxes_out) self.assertAllClose(expected_scores, scores_out) self.assertAllEqual(extra_field_out, [0, 1, 1]) def test_sample_boxes_by_jittering(self): def graph_fn(): boxes = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5], [0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32)) sampled_boxes = box_list_ops.sample_boxes_by_jittering( boxlist=boxes, num_boxes_to_sample=10) iou = box_list_ops.iou(boxes, sampled_boxes) iou_max = tf.reduce_max(iou, axis=0) return sampled_boxes.get(), iou_max np_sampled_boxes, np_iou_max = self.execute(graph_fn, []) self.assertAllEqual(np_sampled_boxes.shape, [10, 4]) self.assertAllGreater(np_iou_max, 0.3) if __name__ == '__main__': tf.test.main()
48,595
42.978281
80
py
models
models-master/research/object_detection/core/balanced_positive_negative_sampler.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class to subsample minibatches by balancing positives and negatives. Subsamples minibatches based on a pre-specified positive fraction in range [0,1]. The class presumes there are many more negatives than positive examples: if the desired batch_size cannot be achieved with the pre-specified positive fraction, it fills the rest with negative examples. If this is not sufficient for obtaining the desired batch_size, it returns fewer examples. The main function to call is Subsample(self, indicator, labels). For convenience one can also call SubsampleWeights(self, weights, labels) which is defined in the minibatch_sampler base class. When is_static is True, it implements a method that guarantees static shapes. It also ensures the length of output of the subsample is always batch_size, even when number of examples set to True in indicator is less than batch_size. """ import tensorflow.compat.v1 as tf from object_detection.core import minibatch_sampler class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler): """Subsamples minibatches to a desired balance of positives and negatives.""" def __init__(self, positive_fraction=0.5, is_static=False): """Constructs a minibatch sampler. Args: positive_fraction: desired fraction of positive examples (scalar in [0,1]) in the batch. is_static: If True, uses an implementation with static shape guarantees. Raises: ValueError: if positive_fraction < 0, or positive_fraction > 1 """ if positive_fraction < 0 or positive_fraction > 1: raise ValueError('positive_fraction should be in range [0,1]. ' 'Received: %s.' % positive_fraction) self._positive_fraction = positive_fraction self._is_static = is_static def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size): """Counts the number of positives and negatives numbers to be sampled. Args: sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains the signed indices of the examples where the sign is based on the label value. The examples that cannot be sampled are set to 0. It samples atmost sample_size*positive_fraction positive examples and remaining from negative examples. sample_size: Size of subsamples. Returns: A tuple containing the number of positive and negative labels in the subsample. """ input_length = tf.shape(sorted_indices_tensor)[0] valid_positive_index = tf.greater(sorted_indices_tensor, tf.zeros(input_length, tf.int32)) num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32)) max_num_positive_samples = tf.constant( int(sample_size * self._positive_fraction), tf.int32) num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos) num_negative_samples = tf.constant(sample_size, tf.int32) - num_positive_samples return num_positive_samples, num_negative_samples def _get_values_from_start_and_end(self, input_tensor, num_start_samples, num_end_samples, total_num_samples): """slices num_start_samples and last num_end_samples from input_tensor. Args: input_tensor: An int32 tensor of shape [N] to be sliced. num_start_samples: Number of examples to be sliced from the beginning of the input tensor. num_end_samples: Number of examples to be sliced from the end of the input tensor. total_num_samples: Sum of is num_start_samples and num_end_samples. This should be a scalar. Returns: A tensor containing the first num_start_samples and last num_end_samples from input_tensor. """ input_length = tf.shape(input_tensor)[0] start_positions = tf.less(tf.range(input_length), num_start_samples) end_positions = tf.greater_equal( tf.range(input_length), input_length - num_end_samples) selected_positions = tf.logical_or(start_positions, end_positions) selected_positions = tf.cast(selected_positions, tf.float32) indexed_positions = tf.multiply(tf.cumsum(selected_positions), selected_positions) one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1, total_num_samples, dtype=tf.float32) return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32), one_hot_selector, axes=[0, 0]), tf.int32) def _static_subsample(self, indicator, batch_size, labels): """Returns subsampled minibatch. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. N should be a complie time constant. batch_size: desired batch size. This scalar cannot be None. labels: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. N should be a complie time constant. Returns: sampled_idx_indicator: boolean tensor of shape [N], True for entries which are sampled. It ensures the length of output of the subsample is always batch_size, even when number of examples set to True in indicator is less than batch_size. Raises: ValueError: if labels and indicator are not 1D boolean tensors. """ # Check if indicator and labels have a static size. if not indicator.shape.is_fully_defined(): raise ValueError('indicator must be static in shape when is_static is' 'True') if not labels.shape.is_fully_defined(): raise ValueError('labels must be static in shape when is_static is' 'True') if not isinstance(batch_size, int): raise ValueError('batch_size has to be an integer when is_static is' 'True.') input_length = tf.shape(indicator)[0] # Set the number of examples set True in indicator to be at least # batch_size. num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32)) additional_false_sample = tf.less_equal( tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)), batch_size - num_true_sampled) indicator = tf.logical_or(indicator, additional_false_sample) # Shuffle indicator and label. Need to store the permutation to restore the # order post sampling. permutation = tf.random_shuffle(tf.range(input_length)) indicator = tf.gather(indicator, permutation, axis=0) labels = tf.gather(labels, permutation, axis=0) # index (starting from 1) when indicator is True, 0 when False indicator_idx = tf.where( indicator, tf.range(1, input_length + 1), tf.zeros(input_length, tf.int32)) # Replace -1 for negative, +1 for positive labels signed_label = tf.where( labels, tf.ones(input_length, tf.int32), tf.scalar_mul(-1, tf.ones(input_length, tf.int32))) # negative of index for negative label, positive index for positive label, # 0 when indicator is False. signed_indicator_idx = tf.multiply(indicator_idx, signed_label) sorted_signed_indicator_idx = tf.nn.top_k( signed_indicator_idx, input_length, sorted=True).values [num_positive_samples, num_negative_samples] = self._get_num_pos_neg_samples( sorted_signed_indicator_idx, batch_size) sampled_idx = self._get_values_from_start_and_end( sorted_signed_indicator_idx, num_positive_samples, num_negative_samples, batch_size) # Shift the indices to start from 0 and remove any samples that are set as # False. sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32) sampled_idx = tf.multiply( tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32), sampled_idx) sampled_idx_indicator = tf.cast(tf.reduce_sum( tf.one_hot(sampled_idx, depth=input_length), axis=0), tf.bool) # project back the order based on stored permutations idx_indicator = tf.scatter_nd( tf.expand_dims(permutation, -1), sampled_idx_indicator, shape=(input_length,)) return idx_indicator def subsample(self, indicator, batch_size, labels, scope=None): """Returns subsampled minibatch. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. batch_size: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches self._positive_fraction. It cannot be None is is_static is True. labels: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. scope: name scope. Returns: sampled_idx_indicator: boolean tensor of shape [N], True for entries which are sampled. Raises: ValueError: if labels and indicator are not 1D boolean tensors. """ if len(indicator.get_shape().as_list()) != 1: raise ValueError('indicator must be 1 dimensional, got a tensor of ' 'shape %s' % indicator.get_shape()) if len(labels.get_shape().as_list()) != 1: raise ValueError('labels must be 1 dimensional, got a tensor of ' 'shape %s' % labels.get_shape()) if labels.dtype != tf.bool: raise ValueError('labels should be of type bool. Received: %s' % labels.dtype) if indicator.dtype != tf.bool: raise ValueError('indicator should be of type bool. Received: %s' % indicator.dtype) with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'): if self._is_static: return self._static_subsample(indicator, batch_size, labels) else: # Only sample from indicated samples negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if batch_size is None: max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) else: max_num_pos = int(self._positive_fraction * batch_size) sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) if batch_size is None: negative_positive_ratio = ( 1 - self._positive_fraction) / self._positive_fraction max_num_neg = tf.cast( negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32) else: max_num_neg = batch_size - num_sampled_pos sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
11,632
43.231939
80
py
models
models-master/research/object_detection/core/region_similarity_calculator.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Region Similarity Calculators for BoxLists. Region Similarity Calculators compare a pairwise measure of similarity between the boxes in two BoxLists. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import ABCMeta from abc import abstractmethod import six import tensorflow.compat.v1 as tf from object_detection.core import box_list_ops from object_detection.core import standard_fields as fields class RegionSimilarityCalculator(six.with_metaclass(ABCMeta, object)): """Abstract base class for region similarity calculator.""" def compare(self, boxlist1, boxlist2, scope=None): """Computes matrix of pairwise similarity between BoxLists. This op (to be overridden) computes a measure of pairwise similarity between the boxes in the given BoxLists. Higher values indicate more similarity. Note that this method simply measures similarity and does not explicitly perform a matching. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. scope: Op scope name. Defaults to 'Compare' if None. Returns: a (float32) tensor of shape [N, M] with pairwise similarity score. """ with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope: return self._compare(boxlist1, boxlist2) @abstractmethod def _compare(self, boxlist1, boxlist2): pass class IouSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on Intersection over Union (IOU) metric. This class computes pairwise similarity between two BoxLists based on IOU. """ def _compare(self, boxlist1, boxlist2): """Compute pairwise IOU similarity between the two BoxLists. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing pairwise iou scores. """ return box_list_ops.iou(boxlist1, boxlist2) class DETRSimilarity(RegionSimilarityCalculator): """Class to compute similarity for the Detection Transformer model. This class computes pairwise DETR similarity between two BoxLists using a weighted combination of GIOU, classification scores, and the L1 loss. """ def __init__(self, l1_weight=5, giou_weight=2): super().__init__() self.l1_weight = l1_weight self.giou_weight = giou_weight def _compare(self, boxlist1, boxlist2): """Compute pairwise DETR similarity between the two BoxLists. Args: boxlist1: BoxList holding N groundtruth boxes. boxlist2: BoxList holding M predicted boxes. Returns: A tensor with shape [N, M] representing pairwise DETR similarity scores. """ groundtruth_labels = boxlist1.get_field(fields.BoxListFields.classes) predicted_labels = boxlist2.get_field(fields.BoxListFields.classes) classification_scores = tf.matmul(groundtruth_labels, predicted_labels, transpose_b=True) loss = self.l1_weight * box_list_ops.l1( boxlist1, boxlist2) + self.giou_weight * (1 - box_list_ops.giou( boxlist1, boxlist2)) - classification_scores return -loss class NegSqDistSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on the squared distance metric. This class computes pairwise similarity between two BoxLists based on the negative squared distance metric. """ def _compare(self, boxlist1, boxlist2): """Compute matrix of (negated) sq distances. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing negated pairwise squared distance. """ return -1 * box_list_ops.sq_dist(boxlist1, boxlist2) class IoaSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on Intersection over Area (IOA) metric. This class computes pairwise similarity between two BoxLists based on their pairwise intersections divided by the areas of second BoxLists. """ def _compare(self, boxlist1, boxlist2): """Compute pairwise IOA similarity between the two BoxLists. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing pairwise IOA scores. """ return box_list_ops.ioa(boxlist1, boxlist2) class ThresholdedIouSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on thresholded IOU and score. This class computes pairwise similarity between two BoxLists based on IOU and a 'score' present in boxlist1. If IOU > threshold, then the entry in the output pairwise tensor will contain `score`, otherwise 0. """ def __init__(self, iou_threshold=0): """Initialize the ThresholdedIouSimilarity. Args: iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold, then the comparison result will be the foreground probability of the first box, otherwise it will be zero. """ super(ThresholdedIouSimilarity, self).__init__() self._iou_threshold = iou_threshold def _compare(self, boxlist1, boxlist2): """Compute pairwise IOU similarity between the two BoxLists and score. Args: boxlist1: BoxList holding N boxes. Must have a score field. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing scores threholded by pairwise iou scores. """ ious = box_list_ops.iou(boxlist1, boxlist2) scores = boxlist1.get_field(fields.BoxListFields.scores) scores = tf.expand_dims(scores, axis=1) row_replicated_scores = tf.tile(scores, [1, tf.shape(ious)[-1]]) thresholded_ious = tf.where(ious > self._iou_threshold, row_replicated_scores, tf.zeros_like(ious)) return thresholded_ious
6,626
33.336788
80
py
models
models-master/research/object_detection/core/matcher_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.matcher.""" import numpy as np import tensorflow.compat.v1 as tf from object_detection.core import matcher from object_detection.utils import test_case class MatchTest(test_case.TestCase): def test_get_correct_matched_columnIndices(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) matched_column_indices = match.matched_column_indices() return matched_column_indices expected_column_indices = [0, 1, 3, 5] matched_column_indices = self.execute(graph_fn, []) self.assertAllEqual(matched_column_indices, expected_column_indices) def test_get_correct_counts(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 1, -2]) match = matcher.Match(match_results) num_matched_columns = match.num_matched_columns() num_unmatched_columns = match.num_unmatched_columns() num_ignored_columns = match.num_ignored_columns() num_matched_rows = match.num_matched_rows() return [num_matched_columns, num_unmatched_columns, num_ignored_columns, num_matched_rows] (num_matched_columns_out, num_unmatched_columns_out, num_ignored_columns_out, num_matched_rows_out) = self.execute_cpu(graph_fn, []) exp_num_matched_columns = 4 exp_num_unmatched_columns = 2 exp_num_ignored_columns = 1 exp_num_matched_rows = 3 self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns) self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns) self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns) self.assertAllEqual(num_matched_rows_out, exp_num_matched_rows) def testGetCorrectUnmatchedColumnIndices(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) unmatched_column_indices = match.unmatched_column_indices() return unmatched_column_indices unmatched_column_indices = self.execute(graph_fn, []) expected_column_indices = [2, 4] self.assertAllEqual(unmatched_column_indices, expected_column_indices) def testGetCorrectMatchedRowIndices(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) matched_row_indices = match.matched_row_indices() return matched_row_indices matched_row_indices = self.execute(graph_fn, []) expected_row_indices = [3, 1, 0, 5] self.assertAllEqual(matched_row_indices, expected_row_indices) def test_get_correct_ignored_column_indices(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) ignored_column_indices = match.ignored_column_indices() return ignored_column_indices ignored_column_indices = self.execute(graph_fn, []) expected_column_indices = [6] self.assertAllEqual(ignored_column_indices, expected_column_indices) def test_get_correct_matched_column_indicator(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) matched_column_indicator = match.matched_column_indicator() return matched_column_indicator expected_column_indicator = [True, True, False, True, False, True, False] matched_column_indicator = self.execute(graph_fn, []) self.assertAllEqual(matched_column_indicator, expected_column_indicator) def test_get_correct_unmatched_column_indicator(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) unmatched_column_indicator = match.unmatched_column_indicator() return unmatched_column_indicator expected_column_indicator = [False, False, True, False, True, False, False] unmatched_column_indicator = self.execute(graph_fn, []) self.assertAllEqual(unmatched_column_indicator, expected_column_indicator) def test_get_correct_ignored_column_indicator(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) ignored_column_indicator = match.ignored_column_indicator() return ignored_column_indicator expected_column_indicator = [False, False, False, False, False, False, True] ignored_column_indicator = self.execute(graph_fn, []) self.assertAllEqual(ignored_column_indicator, expected_column_indicator) def test_get_correct_unmatched_ignored_column_indices(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) unmatched_ignored_column_indices = (match. unmatched_or_ignored_column_indices()) return unmatched_ignored_column_indices expected_column_indices = [2, 4, 6] unmatched_ignored_column_indices = self.execute(graph_fn, []) self.assertAllEqual(unmatched_ignored_column_indices, expected_column_indices) def test_all_columns_accounted_for(self): # Note: deliberately setting to small number so not always # all possibilities appear (matched, unmatched, ignored) def graph_fn(): match_results = tf.random_uniform( [num_matches], minval=-2, maxval=5, dtype=tf.int32) match = matcher.Match(match_results) matched_column_indices = match.matched_column_indices() unmatched_column_indices = match.unmatched_column_indices() ignored_column_indices = match.ignored_column_indices() return (matched_column_indices, unmatched_column_indices, ignored_column_indices) num_matches = 10 matched, unmatched, ignored = self.execute(graph_fn, []) all_indices = np.hstack((matched, unmatched, ignored)) all_indices_sorted = np.sort(all_indices) self.assertAllEqual(all_indices_sorted, np.arange(num_matches, dtype=np.int32)) def test_scalar_gather_based_on_match(self): def graph_fn(): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) input_tensor = tf.constant([0, 1, 2, 3, 4, 5, 6, 7], dtype=tf.float32) match = matcher.Match(match_results) gathered_tensor = match.gather_based_on_match(input_tensor, unmatched_value=100., ignored_value=200.) return gathered_tensor expected_gathered_tensor = [3, 1, 100, 0, 100, 5, 200] gathered_tensor_out = self.execute(graph_fn, []) self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) def test_multidimensional_gather_based_on_match(self): def graph_fn(): match_results = tf.constant([1, -1, -2]) input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]], dtype=tf.float32) match = matcher.Match(match_results) gathered_tensor = match.gather_based_on_match(input_tensor, unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4)) return gathered_tensor expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] gathered_tensor_out = self.execute(graph_fn, []) self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) def test_multidimensional_gather_based_on_match_with_matmul_gather_op(self): def graph_fn(): match_results = tf.constant([1, -1, -2]) input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]], dtype=tf.float32) match = matcher.Match(match_results, use_matmul_gather=True) gathered_tensor = match.gather_based_on_match(input_tensor, unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4)) return gathered_tensor expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] gathered_tensor_out = self.execute(graph_fn, []) self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) if __name__ == '__main__': tf.test.main()
8,989
45.822917
80
py
models
models-master/research/object_detection/core/__init__.py
1
0
0
py
models
models-master/research/object_detection/core/batch_multiclass_nms_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for google3.third_party.tensorflow_models.object_detection.core.batch_multiclass_nms.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from six.moves import range import tensorflow.compat.v1 as tf from object_detection.core import post_processing from object_detection.utils import test_case class BatchMulticlassNonMaxSuppressionTest(test_case.TestCase, parameterized.TestCase): def test_batch_multiclass_nms_with_batch_size_1(self): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]], [[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0], [.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = [[[0, 10, 1, 11], [0, 0, 1, 1], [0, 999, 2, 1004], [0, 100, 1, 101]]] exp_nms_scores = [[.95, .9, .85, .3]] exp_nms_classes = [[0, 0, 1, 0]] def graph_fn(boxes, scores): (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size) self.assertIsNone(nmsed_masks) self.assertIsNone(nmsed_additional_fields) return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertEqual(num_detections, [4]) def test_batch_iou_with_negative_data(self): def graph_fn(): boxes = tf.constant([[[0, -0.01, 0.1, 1.1], [0, 0.2, 0.2, 5.0], [0, -0.01, 0.1, 1.], [-1, -1, -1, -1]]], tf.float32) iou = post_processing.batch_iou(boxes, boxes) return iou iou = self.execute_cpu(graph_fn, []) expected_iou = [[[0.99999994, 0.0917431, 0.9099099, -1.], [0.0917431, 1., 0.08154944, -1.], [0.9099099, 0.08154944, 1., -1.], [-1., -1., -1., -1.]]] self.assertAllClose(iou, expected_iou) @parameterized.parameters(False, True) def test_batch_multiclass_nms_with_batch_size_2(self, use_dynamic_map_fn): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]]], [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0]], [[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = np.array([[[0, 10, 1, 11], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 999, 2, 1004], [0, 10.1, 1, 11.1], [0, 100, 1, 101], [0, 0, 0, 0]]]) exp_nms_scores = np.array([[.95, .9, 0, 0], [.85, .5, .3, 0]]) exp_nms_classes = np.array([[0, 0, 0, 0], [1, 0, 0, 0]]) def graph_fn(boxes, scores): (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, use_dynamic_map_fn=use_dynamic_map_fn) self.assertIsNone(nmsed_masks) self.assertIsNone(nmsed_additional_fields) # Check static shapes self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) self.assertEqual(num_detections.shape.as_list(), [2]) return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertAllClose(num_detections, [2, 3]) def test_batch_multiclass_nms_with_per_batch_clip_window(self): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]]], [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0]], [[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) clip_window = np.array([0., 0., 200., 200.], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = np.array([[[0, 10, 1, 11], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 10.1, 1, 11.1], [0, 100, 1, 101], [0, 0, 0, 0], [0, 0, 0, 0]]]) exp_nms_scores = np.array([[.95, .9, 0, 0], [.5, .3, 0, 0]]) exp_nms_classes = np.array([[0, 0, 0, 0], [0, 0, 0, 0]]) def graph_fn(boxes, scores, clip_window): (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, clip_window=clip_window) self.assertIsNone(nmsed_masks) self.assertIsNone(nmsed_additional_fields) # Check static shapes self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) self.assertEqual(num_detections.shape.as_list(), [2]) return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = self.execute_cpu(graph_fn, [boxes, scores, clip_window]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertAllClose(num_detections, [2, 2]) def test_batch_multiclass_nms_with_per_image_clip_window(self): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]]], [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0]], [[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) clip_window = np.array([[0., 0., 5., 5.], [0., 0., 200., 200.]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = np.array([[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 10.1, 1, 11.1], [0, 100, 1, 101], [0, 0, 0, 0], [0, 0, 0, 0]]]) exp_nms_scores = np.array([[.9, 0., 0., 0.], [.5, .3, 0, 0]]) exp_nms_classes = np.array([[0, 0, 0, 0], [0, 0, 0, 0]]) def graph_fn(boxes, scores, clip_window): (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, clip_window=clip_window) self.assertIsNone(nmsed_masks) self.assertIsNone(nmsed_additional_fields) # Check static shapes self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) self.assertEqual(num_detections.shape.as_list(), [2]) return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = self.execute_cpu(graph_fn, [boxes, scores, clip_window]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertAllClose(num_detections, [1, 2]) def test_batch_multiclass_nms_with_masks(self): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]]], [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0]], [[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) masks = np.array([[[[[0, 1], [2, 3]], [[1, 2], [3, 4]]], [[[2, 3], [4, 5]], [[3, 4], [5, 6]]], [[[4, 5], [6, 7]], [[5, 6], [7, 8]]], [[[6, 7], [8, 9]], [[7, 8], [9, 10]]]], [[[[8, 9], [10, 11]], [[9, 10], [11, 12]]], [[[10, 11], [12, 13]], [[11, 12], [13, 14]]], [[[12, 13], [14, 15]], [[13, 14], [15, 16]]], [[[14, 15], [16, 17]], [[15, 16], [17, 18]]]]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = np.array([[[0, 10, 1, 11], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 999, 2, 1004], [0, 10.1, 1, 11.1], [0, 100, 1, 101], [0, 0, 0, 0]]]) exp_nms_scores = np.array([[.95, .9, 0, 0], [.85, .5, .3, 0]]) exp_nms_classes = np.array([[0, 0, 0, 0], [1, 0, 0, 0]]) exp_nms_masks = np.array([[[[6, 7], [8, 9]], [[0, 1], [2, 3]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[13, 14], [15, 16]], [[8, 9], [10, 11]], [[10, 11], [12, 13]], [[0, 0], [0, 0]]]]) def graph_fn(boxes, scores, masks): (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, masks=masks) self.assertIsNone(nmsed_additional_fields) # Check static shapes self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) self.assertAllEqual(nmsed_masks.shape.as_list(), exp_nms_masks.shape) self.assertEqual(num_detections.shape.as_list(), [2]) return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, num_detections) (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, num_detections) = self.execute_cpu(graph_fn, [boxes, scores, masks]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertAllClose(num_detections, [2, 3]) self.assertAllClose(nmsed_masks, exp_nms_masks) def test_batch_multiclass_nms_with_additional_fields(self): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]]], [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0]], [[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) keypoints = np.array( [[[[6, 7], [8, 9]], [[0, 1], [2, 3]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[13, 14], [15, 16]], [[8, 9], [10, 11]], [[10, 11], [12, 13]], [[0, 0], [0, 0]]]], np.float32) size = np.array( [[[[6], [8]], [[0], [2]], [[0], [0]], [[0], [0]]], [[[13], [15]], [[8], [10]], [[10], [12]], [[0], [0]]]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = np.array([[[0, 10, 1, 11], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 999, 2, 1004], [0, 10.1, 1, 11.1], [0, 100, 1, 101], [0, 0, 0, 0]]]) exp_nms_scores = np.array([[.95, .9, 0, 0], [.85, .5, .3, 0]]) exp_nms_classes = np.array([[0, 0, 0, 0], [1, 0, 0, 0]]) exp_nms_additional_fields = { 'keypoints': np.array([[[[0, 0], [0, 0]], [[6, 7], [8, 9]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[10, 11], [12, 13]], [[13, 14], [15, 16]], [[8, 9], [10, 11]], [[0, 0], [0, 0]]]]) } exp_nms_additional_fields['size'] = np.array([[[[0], [0]], [[6], [8]], [[0], [0]], [[0], [0]]], [[[10], [12]], [[13], [15]], [[8], [10]], [[0], [0]]]]) def graph_fn(boxes, scores, keypoints, size): additional_fields = {'keypoints': keypoints, 'size': size} (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, additional_fields=additional_fields) self.assertIsNone(nmsed_masks) # Check static shapes self.assertAllEqual(nmsed_boxes.shape.as_list(), exp_nms_corners.shape) self.assertAllEqual(nmsed_scores.shape.as_list(), exp_nms_scores.shape) self.assertAllEqual(nmsed_classes.shape.as_list(), exp_nms_classes.shape) self.assertEqual(len(nmsed_additional_fields), len(exp_nms_additional_fields)) for key in exp_nms_additional_fields: self.assertAllEqual(nmsed_additional_fields[key].shape.as_list(), exp_nms_additional_fields[key].shape) self.assertEqual(num_detections.shape.as_list(), [2]) return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_additional_fields['keypoints'], nmsed_additional_fields['size'], num_detections) (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_keypoints, nmsed_size, num_detections) = self.execute_cpu(graph_fn, [boxes, scores, keypoints, size]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertAllClose(nmsed_keypoints, exp_nms_additional_fields['keypoints']) self.assertAllClose(nmsed_size, exp_nms_additional_fields['size']) self.assertAllClose(num_detections, [2, 3]) def test_batch_multiclass_nms_with_masks_and_num_valid_boxes(self): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]]], [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0]], [[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) masks = np.array([[[[[0, 1], [2, 3]], [[1, 2], [3, 4]]], [[[2, 3], [4, 5]], [[3, 4], [5, 6]]], [[[4, 5], [6, 7]], [[5, 6], [7, 8]]], [[[6, 7], [8, 9]], [[7, 8], [9, 10]]]], [[[[8, 9], [10, 11]], [[9, 10], [11, 12]]], [[[10, 11], [12, 13]], [[11, 12], [13, 14]]], [[[12, 13], [14, 15]], [[13, 14], [15, 16]]], [[[14, 15], [16, 17]], [[15, 16], [17, 18]]]]], np.float32) num_valid_boxes = np.array([1, 1], np.int32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = [[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 10.1, 1, 11.1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] exp_nms_scores = [[.9, 0, 0, 0], [.5, 0, 0, 0]] exp_nms_classes = [[0, 0, 0, 0], [0, 0, 0, 0]] exp_nms_masks = [[[[0, 1], [2, 3]], [[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[8, 9], [10, 11]], [[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]]] def graph_fn(boxes, scores, masks, num_valid_boxes): (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, masks=masks, num_valid_boxes=num_valid_boxes) self.assertIsNone(nmsed_additional_fields) return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, num_detections) (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, num_detections) = self.execute_cpu(graph_fn, [boxes, scores, masks, num_valid_boxes]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertAllClose(num_detections, [1, 1]) self.assertAllClose(nmsed_masks, exp_nms_masks) def test_batch_multiclass_nms_with_additional_fields_and_num_valid_boxes( self): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]]], [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0]], [[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) keypoints = np.array( [[[[6, 7], [8, 9]], [[0, 1], [2, 3]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[13, 14], [15, 16]], [[8, 9], [10, 11]], [[10, 11], [12, 13]], [[0, 0], [0, 0]]]], np.float32) size = np.array( [[[[7], [9]], [[1], [3]], [[0], [0]], [[0], [0]]], [[[14], [16]], [[9], [11]], [[11], [13]], [[0], [0]]]], np.float32) num_valid_boxes = np.array([1, 1], np.int32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = [[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 10.1, 1, 11.1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] exp_nms_scores = [[.9, 0, 0, 0], [.5, 0, 0, 0]] exp_nms_classes = [[0, 0, 0, 0], [0, 0, 0, 0]] exp_nms_additional_fields = { 'keypoints': np.array([[[[6, 7], [8, 9]], [[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[13, 14], [15, 16]], [[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]]]) } exp_nms_additional_fields['size'] = np.array([[[[7], [9]], [[0], [0]], [[0], [0]], [[0], [0]]], [[[14], [16]], [[0], [0]], [[0], [0]], [[0], [0]]]]) def graph_fn(boxes, scores, keypoints, size, num_valid_boxes): additional_fields = {'keypoints': keypoints, 'size': size} (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, num_valid_boxes=num_valid_boxes, additional_fields=additional_fields) self.assertIsNone(nmsed_masks) return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_additional_fields['keypoints'], nmsed_additional_fields['size'], num_detections) (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_keypoints, nmsed_size, num_detections) = self.execute_cpu(graph_fn, [boxes, scores, keypoints, size, num_valid_boxes]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertAllClose(nmsed_keypoints, exp_nms_additional_fields['keypoints']) self.assertAllClose(nmsed_size, exp_nms_additional_fields['size']) self.assertAllClose(num_detections, [1, 1]) def test_combined_nms_with_batch_size_2(self): """Test use_combined_nms.""" boxes = np.array([[[[0, 0, 0.1, 0.1], [0, 0, 0.1, 0.1]], [[0, 0.01, 1, 0.11], [0, 0.6, 0.1, 0.7]], [[0, -0.01, 0.1, 0.09], [0, -0.1, 0.1, 0.09]], [[0, 0.11, 0.1, 0.2], [0, 0.11, 0.1, 0.2]]], [[[0, 0, 0.2, 0.2], [0, 0, 0.2, 0.2]], [[0, 0.02, 0.2, 0.22], [0, 0.02, 0.2, 0.22]], [[0, -0.02, 0.2, 0.19], [0, -0.02, 0.2, 0.19]], [[0, 0.21, 0.2, 0.3], [0, 0.21, 0.2, 0.3]]]], np.float32) scores = np.array([[[.1, 0.9], [.75, 0.8], [.6, 0.3], [0.95, 0.1]], [[.1, 0.9], [.75, 0.8], [.6, .3], [.95, .1]]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 3 exp_nms_corners = np.array([[[0, 0.11, 0.1, 0.2], [0, 0, 0.1, 0.1], [0, 0.6, 0.1, 0.7]], [[0, 0.21, 0.2, 0.3], [0, 0, 0.2, 0.2], [0, 0.02, 0.2, 0.22]]]) exp_nms_scores = np.array([[.95, .9, 0.8], [.95, .9, .75]]) exp_nms_classes = np.array([[0, 1, 1], [0, 1, 0]]) def graph_fn(boxes, scores): (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, nmsed_additional_fields, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, max_total_size=max_output_size, use_static_shapes=True, use_combined_nms=True) self.assertIsNone(nmsed_masks) self.assertIsNone(nmsed_additional_fields) return (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = self.execute_cpu(graph_fn, [boxes, scores]) self.assertAllClose(nmsed_boxes, exp_nms_corners) self.assertAllClose(nmsed_scores, exp_nms_scores) self.assertAllClose(nmsed_classes, exp_nms_classes) self.assertListEqual(num_detections.tolist(), [3, 3]) def test_batch_multiclass_nms_with_use_static_shapes(self): boxes = np.array([[[[0, 0, 1, 1], [0, 0, 4, 5]], [[0, 0.1, 1, 1.1], [0, 0.1, 2, 1.1]], [[0, -0.1, 1, 0.9], [0, -0.1, 1, 0.9]], [[0, 10, 1, 11], [0, 10, 1, 11]]], [[[0, 10.1, 1, 11.1], [0, 10.1, 1, 11.1]], [[0, 100, 1, 101], [0, 100, 1, 101]], [[0, 1000, 1, 1002], [0, 999, 2, 1004]], [[0, 1000, 1, 1002.1], [0, 999, 2, 1002.7]]]], np.float32) scores = np.array([[[.9, 0.01], [.75, 0.05], [.6, 0.01], [.95, 0]], [[.5, 0.01], [.3, 0.01], [.01, .85], [.01, .5]]], np.float32) clip_window = np.array([[0., 0., 5., 5.], [0., 0., 200., 200.]], np.float32) score_thresh = 0.1 iou_thresh = .5 max_output_size = 4 exp_nms_corners = np.array([[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 10.1, 1, 11.1], [0, 100, 1, 101], [0, 0, 0, 0], [0, 0, 0, 0]]]) exp_nms_scores = np.array([[.9, 0., 0., 0.], [.5, .3, 0, 0]]) exp_nms_classes = np.array([[0, 0, 0, 0], [0, 0, 0, 0]]) def graph_fn(boxes, scores, clip_window): (nmsed_boxes, nmsed_scores, nmsed_classes, _, _, num_detections ) = post_processing.batch_multiclass_non_max_suppression( boxes, scores, score_thresh, iou_thresh, max_size_per_class=max_output_size, clip_window=clip_window, use_static_shapes=True) return nmsed_boxes, nmsed_scores, nmsed_classes, num_detections (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = self.execute(graph_fn, [boxes, scores, clip_window]) for i in range(len(num_detections)): self.assertAllClose(nmsed_boxes[i, 0:num_detections[i]], exp_nms_corners[i, 0:num_detections[i]]) self.assertAllClose(nmsed_scores[i, 0:num_detections[i]], exp_nms_scores[i, 0:num_detections[i]]) self.assertAllClose(nmsed_classes[i, 0:num_detections[i]], exp_nms_classes[i, 0:num_detections[i]]) self.assertAllClose(num_detections, [1, 2]) if __name__ == '__main__': tf.test.main()
32,678
46.567686
97
py
models
models-master/research/object_detection/core/batcher_tf1_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.batcher.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import numpy as np from six.moves import range import tensorflow.compat.v1 as tf import tf_slim as slim from object_detection.core import batcher from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') class BatcherTest(tf.test.TestCase): def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self): with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 2) boxes = tf.tile( tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)])) batch_queue = batcher.BatchQueue( tensor_dict={'boxes': boxes}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, 4], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch) def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions( self): with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 2) image = tf.reshape( tf.range(counter * counter), tf.stack([counter, counter])) batch_queue = batcher.BatchQueue( tensor_dict={'image': image}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, None], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch) def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self): with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(1, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 1) image = tf.reshape(tf.range(1, 13), [4, 3]) * counter batch_queue = batcher.BatchQueue( tensor_dict={'image': image}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([4, 3], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 1 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.arange(1, 13).reshape((4, 3)) * i) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch) def test_batcher_when_batch_size_is_one(self): with self.test_session() as sess: batch_size = 1 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 2) image = tf.reshape( tf.range(counter * counter), tf.stack([counter, counter])) batch_queue = batcher.BatchQueue( tensor_dict={'image': image}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, None], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch) if __name__ == '__main__': tf.test.main()
6,109
35.807229
80
py